F.F.FF.....F.FFFFFF..FFFFFFFFFF...FFFF.FFFF.........FFFFF.F..FFFFFs..FFF [  2%]
.FFFFFFF...F.FFFFFFF.....Fs.F.s.F......F.......F....F.FF.FFF....F....F.. [  5%]
F...FFFFF..FF..F.F.FFFFFFFFFFFFFFsF..F.FFF..FF.FFF.FFF....FFF.FFFFFF.F.. [  8%]
FF.......F.....F.FFF..FFFFFFFFFFFFFssss.F..FFFFs.FFFF.FFFFF.FFFF.F.FFFss [ 11%]
ssssFF.FFF.F....FFF..F........FFsFF.FFFFFFF...F....FFFFsF.FFFF..FFFFFF.. [ 14%]
.FFFFFFFFFF.FFs...FF..FF.....FFsFF.FFsssFFFF..FF.F.FF..FFFF.F........FFF [ 17%]
...F...FF.F..FF.FFF..FFFF..FFF....sFFFFFF.FF.FFFFF.FF.F.....F.....FF.FFF [ 20%]
.......F...FF.FF............F.......FF.FF...F.F..FFF.FF.FFFFFFF.FF.F..FF [ 23%]
FFFF.F.F........F......F..........FFFF.......F.F...F..F.....F..FF.F..... [ 26%]
....FF...FFF.FFF.s.sFFFFFFFFF..FF.FF..FFFFFFFFFFFFsFFFFFFFFFFFF...FFssFF [ 28%]
FFFFFFFFFFF.FFF...............FFFFFFFFFFFF.FFFF..F....F....FF..F.F....FF [ 31%]
.FF.FFF...FFFFFFF..F...F..F.F.....FFFFFF....F.....F.FFF.......FF..FFF..F [ 34%]
F.....F......FFF...FFFF..FFFFF.FFFFFF.FFFF.F.F..FF....F....FF..F.....FF. [ 37%]
.FFF..FFFFFF..F.FFFF..FF..F.FFFF.FFFFFFFFF.....FFF...FFFFFFF.FFF...FFFF. [ 40%]
F.ssFF...F..F.FFF.FFF.FF......FFFFF.F.FFFss.F.....F.FF..FFFFF....F.F.F.. [ 43%]
F..FF.FFFFF.......F....FF...FFFFFF.F...F.F...FFF.F..FF....FF.FF.FFFF.FFF [ 46%]
FFFFFFFF.....................FF.FF.F....F..F.F.F.FFF..FF.F........F.FFFF [ 49%]
.FF.F.F.F.....F.F.....F...FF..FFF.FFF...F.F..F...FF.....F....F.F..FFF.Fs [ 52%]
.....................FFFF......FFFF..F...F.Fsss...F..FF......FF.FF...... [ 55%]
..FF....FFFF.FF...F.FFF...FF...F.FFFFFFFFFFFFFFF.FsF....s..FFF.........F [ 57%]
F...F....FF......FFFF.....FF...FFFFF..F..F.......F..FF.F.FFFFF..FFFFFFFF [ 60%]
FF.sF.s.FF.F.F...FF..FF.FFFFF.FFFFF.F..FFFFFF.F.FFFFFFFFF.F....FFF.F.FFF [ 63%]
FFFF.F..FFFFFFFFFF...F...F...FFF....FFF.FF.FF.F....F..FF................ [ 66%]
.F......sF.....FF....FF.FFFFFFF....F...FFF.....F..FF.sFFFFFFFFFFFF.F.... [ 69%]
..F.F..F..FF..F...FFFFF...FFFF.FsF...FF..F.F...FFFFFF....F.FFFF.FFFFFFFF [ 72%]
FFFFFFFs..F.....FF.FFFFFFFFsFFFFFFFFFFF..F.FF..FFF.FFFFF..s.FFFF....F... [ 75%]
.....F..............F......F....FF.......F...........sFsF..Fs..Fsss..... [ 78%]
.F.F.F..F.F.FFs..F.F.F....FF.....F.FFF.FF.F.FFF.F.FFFFFFF.....FFF.....F. [ 81%]
F..FF..F.F.F.FFFFFF..F......F...sFFFFFFFFF...F......F....FFFFFF....FFF.. [ 83%]
..............F.F.FFFF.....FFFFF.FF.F..F..FFF..s.FFFFF..F.FFFFF..F....F. [ 86%]
..FF..FF.F...F..FF....FF...FFFFFFF.FFFF..FFFFF..F...FFF..FFF.F.FF.FF...F [ 89%]
.F..F....FFFFF...F.FFF.........F...F.F...F.FFFFF.F.....F..FFFFFF.FF.FFFF [ 92%]
.F..FsFFsssssssssFFF.Fss..FFFFFFFFF.FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [ 95%]
.FFFFFFFFFFFFF.F.F.FFFFFFFFFFFFFFFFFFFFFFF.FFFFFFFFFFFFFFFFFFFFFFF.FFFFF [ 98%]
FssFsFFF....FsFFFsF..F.FsFFFsF.sFFFFFFF                                  [100%]
=================================== FAILURES ===================================
___ TestBashSpecTests.test_spec_case[alias.test.sh::Usage of builtins[L10]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x108dd4740>
test_file = 'alias.test.sh'
test_case = TestCase(name='Usage of builtins', script="shopt -s expand_aliases || true\nalias -- foo=echo\necho status=$?\nfoo x\n...riant=None), Assertion(type='stdout', value='status=1\nx', shells=['dash'], variant='BUG')], line_number=10, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Usage of builtins (line 10)
E           
E           status mismatch: expected 127, got 1
E           
E           Expected stdout: 'status=0\nx'
E           Actual stdout:   'status=0\nx\n'
E           Expected stderr: None
E           Actual stderr:   'bash: foo: command not found\n'
E           Expected status: 127
E           Actual status:   1
E           
E           Script:
E           ---
E           shopt -s expand_aliases || true
E           alias -- foo=echo
E           echo status=$?
E           foo x
E           unalias -- foo
E           foo x
E           # dash doesn't accept --
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::define and use alias on a single line[L40]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x108e28290>
test_file = 'alias.test.sh'
test_case = TestCase(name='define and use alias on a single line', script='shopt -s expand_aliases\nalias e=echo; e one  # this is...hree', assertions=[Assertion(type='stdout', value='two\nthree', shells=None, variant=None)], line_number=40, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: define and use alias on a single line (line 40)
E           
E           stdout mismatch:
E             expected: 'two\nthree'
E             actual:   'one\ntwo\nthree'
E           
E           Expected stdout: 'two\nthree'
E           Actual stdout:   'one\ntwo\nthree\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases
E           alias e=echo; e one  # this is not alias-expanded because we parse lines at once
E           e two; e three
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::defining multiple aliases, then unalias[L56]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109041cd0>
test_file = 'alias.test.sh'
test_case = TestCase(name='defining multiple aliases, then unalias', script="shopt -s expand_aliases  # bash requires this\nx=x\ny...t', value='status=0\nx X\ny Y\nstatus=0\nundefined\nundefined', shells=None, variant=None)], line_number=56, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: defining multiple aliases, then unalias (line 56)
E           
E           stdout mismatch:
E             expected: 'status=0\nx X\ny Y\nstatus=0\nundefined\nundefined'
E             actual:   'status=0\n$x X\n$y Y\nstatus=0\nundefined\nundefined'
E           
E           Expected stdout: 'status=0\nx X\ny Y\nstatus=0\nundefined\nundefined'
E           Actual stdout:   'status=0\n$x X\n$y Y\nstatus=0\nundefined\nundefined\n'
E           Expected stderr: None
E           Actual stderr:   'bash: echo-x: command not found\nbash: echo-y: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases  # bash requires this
E           x=x
E           y=y
E           alias echo-x='echo $x' echo-y='echo $y'
E           echo status=$?
E           echo-x X
E           echo-y Y
E           unalias echo-x echo-y
E           echo status=$?
E           echo-x X || echo undefined
E           echo-y Y || echo undefined
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[alias.test.sh::alias not defined[L77]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109041d90>
test_file = 'alias.test.sh'
test_case = TestCase(name='alias not defined', script="alias e='echo' nonexistentZ\necho status=$?", assertions=[Assertion(type='s...e='stdout', value='nonexistentZ alias not found\nstatus=1', shells=['mksh'], variant='OK')], line_number=77, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: alias not defined (line 77)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   'bash: alias: nonexistentZ: not found\nstatus=1'
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   'bash: alias: nonexistentZ: not found\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           alias e='echo' nonexistentZ
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::alias with trailing space causes alias expansion on second word[L161]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109042210>
test_file = 'alias.test.sh'
test_case = TestCase(name='alias with trailing space causes alias expansion on second word', script="shopt -s expand_aliases  # ba...ion(type='stdout', value='hello world !!!\nhello world punct', shells=None, variant=None)], line_number=161, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: alias with trailing space causes alias expansion on second word (line 161)
E           
E           stdout mismatch:
E             expected: 'hello world !!!\nhello world punct'
E             actual:   'hello world !!! punct\nhello world punct'
E           
E           Expected stdout: 'hello world !!!\nhello world punct'
E           Actual stdout:   'hello world !!! punct\nhello world punct\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases  # bash requires this
E           
E           alias hi='echo hello world '
E           alias punct='!!!'
E           
E           hi punct
E           
E           alias hi='echo hello world'  # No trailing space
E           
E           hi punct
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Recursive alias expansion of SECOND word[L188]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109042390>
test_file = 'alias.test.sh'
test_case = TestCase(name='Recursive alias expansion of SECOND word', script="shopt -s expand_aliases  # bash requires this\nalias...ertions=[Assertion(type='stdout', value='one TWO hello world', shells=None, variant=None)], line_number=188, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Recursive alias expansion of SECOND word (line 188)
E           
E           stdout mismatch:
E             expected: 'one TWO hello world'
E             actual:   'one TWO two hello world'
E           
E           Expected stdout: 'one TWO hello world'
E           Actual stdout:   'one TWO two hello world\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases  # bash requires this
E           alias one='ONE '
E           alias two='TWO '
E           alias e_='echo one '
E           e_ two hello world
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Expansion of alias with variable[L199]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109042450>
test_file = 'alias.test.sh'
test_case = TestCase(name='Expansion of alias with variable', script="shopt -s expand_aliases  # bash requires this\nx=x\nalias ec...echo-x hi", assertions=[Assertion(type='stdout', value='y hi', shells=None, variant=None)], line_number=199, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Expansion of alias with variable (line 199)
E           
E           stdout mismatch:
E             expected: 'y hi'
E             actual:   '$x hi'
E           
E           Expected stdout: 'y hi'
E           Actual stdout:   '$x hi\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases  # bash requires this
E           x=x
E           alias echo-x='echo $x'  # nothing is evaluated here
E           x=y
E           echo-x hi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Alias must be an unquoted word, no expansions allowed[L209]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109042510>
test_file = 'alias.test.sh'
test_case = TestCase(name='Alias must be an unquoted word, no expansions allowed', script="shopt -s expand_aliases  # bash require...", assertions=[Assertion(type='stdout', value='X\nstatus=127', shells=None, variant=None)], line_number=209, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Alias must be an unquoted word, no expansions allowed (line 209)
E           
E           stdout mismatch:
E             expected: 'X\nstatus=127'
E             actual:   'X\nX\nstatus=0'
E           
E           Expected stdout: 'X\nstatus=127'
E           Actual stdout:   'X\nX\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases  # bash requires this
E           alias echo_alias_='echo'
E           cmd=echo_alias_
E           echo_alias_ X  # this works
E           $cmd X  # this fails because it's quoted
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::first and second word are the same alias, but no trailing space[L221]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090425d0>
test_file = 'alias.test.sh'
test_case = TestCase(name='first and second word are the same alias, but no trailing space', script="shopt -s expand_aliases  # ba...cho-x", assertions=[Assertion(type='stdout', value='x echo-x', shells=None, variant=None)], line_number=221, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: first and second word are the same alias, but no trailing space (line 221)
E           
E           stdout mismatch:
E             expected: 'x echo-x'
E             actual:   '$x echo-x'
E           
E           Expected stdout: 'x echo-x'
E           Actual stdout:   '$x echo-x\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases  # bash requires this
E           x=x
E           alias echo-x='echo $x'  # nothing is evaluated here
E           echo-x echo-x
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::first and second word are the same alias, with trailing space[L230]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109042690>
test_file = 'alias.test.sh'
test_case = TestCase(name='first and second word are the same alias, with trailing space', script="shopt -s expand_aliases  # bash...cho-x", assertions=[Assertion(type='stdout', value='x echo x', shells=None, variant=None)], line_number=230, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: first and second word are the same alias, with trailing space (line 230)
E           
E           stdout mismatch:
E             expected: 'x echo x'
E             actual:   '$x echo $x echo-x'
E           
E           Expected stdout: 'x echo x'
E           Actual stdout:   '$x echo $x echo-x\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases  # bash requires this
E           x=x
E           alias echo-x='echo $x '  # nothing is evaluated here
E           echo-x echo-x
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Invalid syntax of alias[L240]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109042750>
test_file = 'alias.test.sh'
test_case = TestCase(name='Invalid syntax of alias', script="shopt -s expand_aliases  # bash requires this\nalias echo_alias_= 'ec...cho_alias_ x", assertions=[Assertion(type='status', value=127, shells=None, variant=None)], line_number=240, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Invalid syntax of alias (line 240)
E           
E           status mismatch: expected 127, got 1
E           
E           Expected stdout: None
E           Actual stdout:   'bash: alias: echo --; echo: not found\n'
E           Expected stderr: None
E           Actual stderr:   'bash: echo_alias_: command not found\n'
E           Expected status: 127
E           Actual status:   1
E           
E           Script:
E           ---
E           shopt -s expand_aliases  # bash requires this
E           alias echo_alias_= 'echo --; echo'  # bad space here
E           echo_alias_ x
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Syntax error after expansion[L263]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109042990>
test_file = 'alias.test.sh'
test_case = TestCase(name='Syntax error after expansion', script="shopt -s expand_aliases  # bash requires this\nalias e_=';; oops...e, variant=None), Assertion(type='status', value=1, shells=['mksh', 'zsh'], variant='OK')], line_number=263, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Syntax error after expansion (line 263)
E           
E           status mismatch: expected 2, got 1
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: ;;: command not found\n'
E           Expected status: 2
E           Actual status:   1
E           
E           Script:
E           ---
E           shopt -s expand_aliases  # bash requires this
E           alias e_=';; oops'
E           e_ x
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Loop split across alias and arg works[L270]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109042a50>
test_file = 'alias.test.sh'
test_case = TestCase(name='Loop split across alias and arg works', script="shopt -s expand_aliases  # bash requires this\nalias e_..._ done", assertions=[Assertion(type='stdout', value='1\n2\n3', shells=None, variant=None)], line_number=270, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Loop split across alias and arg works (line 270)
E           
E           stdout mismatch:
E             expected: '1\n2\n3'
E             actual:   ''
E           
E           Expected stdout: '1\n2\n3'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: for: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           shopt -s expand_aliases  # bash requires this
E           alias e_='for i in 1 2 3; do echo $i;'
E           e_ done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Loop split across alias in another way[L280]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109042b10>
test_file = 'alias.test.sh'
test_case = TestCase(name='Loop split across alias in another way', script="shopt -s expand_aliases\nalias e_='for i in 1 2 3; do ...s=['osh'], variant='OK'), Assertion(type='status', value=2, shells=['osh'], variant='OK')], line_number=280, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Loop split across alias in another way (line 280)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 3, column 8
E           
E           
E           Script:
E           ---
E           shopt -s expand_aliases
E           alias e_='for i in 1 2 3; do echo '
E           e_ $i; done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Loop split across both iterative and recursive aliases[L293]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109042c00>
test_file = 'alias.test.sh'
test_case = TestCase(name='Loop split across both iterative and recursive aliases', script='shopt -s expand_aliases  # bash requir...'], variant='OK'), Assertion(type='stdout-json', value='', shells=['zsh'], variant='BUG')], line_number=293, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Loop split across both iterative and recursive aliases (line 293)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 9, column 24
E           
E           
E           Script:
E           ---
E           shopt -s expand_aliases  # bash requires this
E           alias FOR1='for '
E           alias FOR2='FOR1 '
E           alias eye1='i '
E           alias eye2='eye1 '
E           alias IN='in '
E           alias onetwo='$one "2" '  # NOTE: this does NOT work in any shell except bash.
E           one=1
E           FOR2 eye2 IN onetwo 3; do echo $i; done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Alias with a quote in the middle is a syntax error[L313]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109042cf0>
test_file = 'alias.test.sh'
test_case = TestCase(name='Alias with a quote in the middle is a syntax error', script='shopt -s expand_aliases\nalias e_=\'echo "...e, variant=None), Assertion(type='status', value=1, shells=['mksh', 'zsh'], variant='OK')], line_number=313, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Alias with a quote in the middle is a syntax error (line 313)
E           
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: None
E           Actual stdout:   '" ${var}"\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases
E           alias e_='echo "'
E           var=x
E           e_ '${var}"'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Alias with internal newlines[L322]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109042db0>
test_file = 'alias.test.sh'
test_case = TestCase(name='Alias with internal newlines', script="shopt -s expand_aliases\nalias e_='echo 1\necho 2\necho 3'\nvar=...assertions=[Assertion(type='stdout', value='1\n2\n3 echo foo', shells=None, variant=None)], line_number=322, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Alias with internal newlines (line 322)
E           
E           stdout mismatch:
E             expected: '1\n2\n3 echo foo'
E             actual:   '1 echo 2 echo 3 echo foo'
E           
E           Expected stdout: '1\n2\n3 echo foo'
E           Actual stdout:   '1 echo 2 echo 3 echo foo\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases
E           alias e_='echo 1
E           echo 2
E           echo 3'
E           var='echo foo'
E           e_ ${var}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Alias trailing newline[L335]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109042e70>
test_file = 'alias.test.sh'
test_case = TestCase(name='Alias trailing newline', script="shopt -s expand_aliases\nalias e_='echo 1\necho 2\necho 3\n'\nvar='ech...['zsh'], variant='OK'), Assertion(type='status', value=127, shells=['zsh'], variant='OK')], line_number=335, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Alias trailing newline (line 335)
E           
E           stdout mismatch:
E             expected: '1\n2\n3\nfoo'
E             actual:   '1 echo 2 echo 3 echo foo'
E           
E           Expected stdout: '1\n2\n3\nfoo'
E           Actual stdout:   '1 echo 2 echo 3 echo foo\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases
E           alias e_='echo 1
E           echo 2
E           echo 3
E           '
E           var='echo foo'
E           e_ ${var}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Two aliases in pipeline[L357]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109042f30>
test_file = 'alias.test.sh'
test_case = TestCase(name='Two aliases in pipeline', script="shopt -s expand_aliases\nalias SEQ='seq '\nalias THREE='3 '\nalias WC...HREE | WC -l", assertions=[Assertion(type='stdout', value='3', shells=None, variant=None)], line_number=357, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Two aliases in pipeline (line 357)
E           
E           stdout mismatch:
E             expected: '3'
E             actual:   '0'
E           
E           Expected stdout: '3'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases
E           alias SEQ='seq '
E           alias THREE='3 '
E           alias WC='wc '
E           SEQ THREE | WC -l
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Alias not respected inside $()[L366]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109042ff0>
test_file = 'alias.test.sh'
test_case = TestCase(name='Alias not respected inside $()', script="# This could be parsed correctly, but it is only defined in a ...llo')\nsayhi", assertions=[Assertion(type='status', value=127, shells=None, variant=None)], line_number=366, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Alias not respected inside $() (line 366)
E           
E           status mismatch: expected 127, got 0
E           
E           Expected stdout: None
E           Actual stdout:   '\nhello\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 127
E           Actual status:   0
E           
E           Script:
E           ---
E           # This could be parsed correctly, but it is only defined in a child process.
E           shopt -s expand_aliases
E           echo $(alias sayhi='echo hello')
E           sayhi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Alias can be defined and used on a single line[L373]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090430b0>
test_file = 'alias.test.sh'
test_case = TestCase(name='Alias can be defined and used on a single line', script="shopt -s expand_aliases\nalias sayhi='echo hel...assertions=[Assertion(type='stdout', value='hello other line', shells=None, variant=None)], line_number=373, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Alias can be defined and used on a single line (line 373)
E           
E           stdout mismatch:
E             expected: 'hello other line'
E             actual:   'hello same line\nhello other line'
E           
E           Expected stdout: 'hello other line'
E           Actual stdout:   'hello same line\nhello other line\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases
E           alias sayhi='echo hello'; sayhi same line
E           sayhi other line
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::alias with line continuation in the middle[L418]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090433b0>
test_file = 'alias.test.sh'
test_case = TestCase(name='alias with line continuation in the middle', script="shopt -s expand_aliases\nalias e_='echo '\nalias o...ssertion(type='stdout', value='ONE TWO ONE TWO THREE two one', shells=None, variant=None)], line_number=418, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: alias with line continuation in the middle (line 418)
E           
E           stdout mismatch:
E             expected: 'ONE TWO ONE TWO THREE two one'
E             actual:   'ONE one two one two three two one'
E           
E           Expected stdout: 'ONE TWO ONE TWO THREE two one'
E           Actual stdout:   'ONE one two one two three two one\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases
E           alias e_='echo '
E           alias one='ONE '
E           alias two='TWO '
E           alias three='THREE'  # no trailing space
E           e_ one \
E             two one \
E             two three two \
E             one
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::alias for left brace[L431]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109043470>
test_file = 'alias.test.sh'
test_case = TestCase(name='alias for left brace', script="shopt -s expand_aliases\nalias LEFT='{'\nLEFT echo one; echo two; }", as...s=['osh'], variant='OK'), Assertion(type='status', value=2, shells=['osh'], variant='OK')], line_number=431, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: alias for left brace (line 431)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 3, column 26
E           
E           
E           Script:
E           ---
E           shopt -s expand_aliases
E           alias LEFT='{'
E           LEFT echo one; echo two; }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::alias for left paren[L443]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109043530>
test_file = 'alias.test.sh'
test_case = TestCase(name='alias for left paren', script="shopt -s expand_aliases\nalias LEFT='('\nLEFT echo one; echo two )", ass...s=['osh'], variant='OK'), Assertion(type='status', value=2, shells=['osh'], variant='OK')], line_number=443, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: alias for left paren (line 443)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 3, column 25
E           
E           
E           Script:
E           ---
E           shopt -s expand_aliases
E           alias LEFT='('
E           LEFT echo one; echo two )
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::alias used in subshell and command sub[L455]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090435f0>
test_file = 'alias.test.sh'
test_case = TestCase(name='alias used in subshell and command sub', script='# This spec seems to be contradictoary?\n# http://pubs...ns=[Assertion(type='stdout', value='[ subshell\n[ commandsub', shells=None, variant=None)], line_number=455, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: alias used in subshell and command sub (line 455)
E           
E           stdout mismatch:
E             expected: '[ subshell\n[ commandsub'
E             actual:   '[ commandsub'
E           
E           Expected stdout: '[ subshell\n[ commandsub'
E           Actual stdout:   '[ commandsub\n'
E           Expected stderr: None
E           Actual stderr:   'bash: echo_: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # This spec seems to be contradictoary?
E           # http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_03_01
E           # "When used as specified by this volume of POSIX.1-2017, alias definitions
E           # shall not be inherited by separate invocations of the shell or by the utility
E           # execution environments invoked by the shell; see Shell Execution
E           # Environment."
E           shopt -s expand_aliases
E           alias echo_='echo [ '
E           ( echo_ subshell; )
E           echo $(echo_ commandsub)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::here doc inside alias[L481]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109043770>
test_file = 'alias.test.sh'
test_case = TestCase(name='here doc inside alias', script="shopt -s expand_aliases\nalias c='cat <<EOF\n$(echo hi)\nEOF\n'\nc", as...sh'], variant='BUG'), Assertion(type='status', value=127, shells=['bash'], variant='BUG')], line_number=481, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: here doc inside alias (line 481)
E           
E           status mismatch: expected 127, got 1
E           
E           Expected stdout: ''
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'cat: <<EOF: No such file or directory\ncat: $(echo: No such file or directory\ncat: hi): No such file or directory\ncat: EOF: No such file or directory\n'
E           Expected status: 127
E           Actual status:   1
E           
E           Script:
E           ---
E           shopt -s expand_aliases
E           alias c='cat <<EOF
E           $(echo hi)
E           EOF
E           '
E           c
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Corner case: alias inside LHS array arithmetic expression[L494]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109043830>
test_file = 'alias.test.sh'
test_case = TestCase(name='Corner case: alias inside LHS array arithmetic expression', script='shopt -s expand_aliases\nalias zero...['zsh'], variant='N-I'), Assertion(type='status', value=1, shells=['zsh'], variant='N-I')], line_number=494, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Corner case: alias inside LHS array arithmetic expression (line 494)
E           
E           stdout mismatch:
E             expected: "['ZERO', 'ONE']"
E             actual:   "['ONE']"
E           
E           Expected stdout: "['ZERO', 'ONE']"
E           Actual stdout:   "['ONE']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases
E           alias zero='echo 0'
E           a[$(zero)]=ZERO
E           a[1]=ONE
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Alias that is pipeline[L508]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090438f0>
test_file = 'alias.test.sh'
test_case = TestCase(name='Alias that is pipeline', script="shopt -s expand_aliases\nalias t1='echo hi|wc -c'\nt1", assertions=[Assertion(type='stdout', value='3', shells=None, variant=None)], line_number=508, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Alias that is pipeline (line 508)
E           
E           stdout mismatch:
E             expected: '3'
E             actual:   'hi|wc -c'
E           
E           Expected stdout: '3'
E           Actual stdout:   'hi|wc -c\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases
E           alias t1='echo hi|wc -c'
E           t1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[alias.test.sh::Alias that is && || ;[L516]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090439b0>
test_file = 'alias.test.sh'
test_case = TestCase(name='Alias that is && || ;', script="shopt -s expand_aliases\nalias t1='echo one && echo two && echo 3 | wc ...ssertions=[Assertion(type='stdout', value='one\ntwo\n1\nfour', shells=None, variant=None)], line_number=516, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Alias that is && || ; (line 516)
E           
E           stdout mismatch:
E             expected: 'one\ntwo\n1\nfour'
E             actual:   'one && echo two && echo 3 | wc -l; echo four'
E           
E           Expected stdout: 'one\ntwo\n1\nfour'
E           Actual stdout:   'one && echo two && echo 3 | wc -l; echo four\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases
E           alias t1='echo one && echo two && echo 3 | wc -l;
E           echo four'
E           t1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[append.test.sh::error: s+=(my array)[L45]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109080170>
test_file = 'append.test.sh'
test_case = TestCase(name='error: s+=(my array)', script='s=\'abc\'\ns+=(d e f)\nargv.py "${s[@]}"', assertions=[Assertion(type='s...None), Assertion(type='stdout', value="['abc', 'd', 'e', 'f']", shells=None, variant=None)], line_number=45, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: error: s+=(my array) (line 45)
E           
E           stdout mismatch:
E             expected: "['abc', 'd', 'e', 'f']"
E             actual:   "['d', 'e', 'f']"
E           
E           Expected stdout: "['abc', 'd', 'e', 'f']"
E           Actual stdout:   "['d', 'e', 'f']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           s='abc'
E           s+=(d e f)
E           argv.py "${s[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[append.test.sh::error: myarray+=s[L54]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109080230>
test_file = 'append.test.sh'
test_case = TestCase(name='error: myarray+=s', script='# They treat this as implicit index 0.  We disallow this on the LHS, so we ...ant='OK'), Assertion(type='stdout', value="['x', 'y', 'z']", shells=['zsh'], variant='OK')], line_number=54, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: error: myarray+=s (line 54)
E           
E           stdout mismatch:
E             expected: "['xz', 'y']"
E             actual:   "['x', 'y']"
E           
E           Expected stdout: "['xz', 'y']"
E           Actual stdout:   "['x', 'y']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # They treat this as implicit index 0.  We disallow this on the LHS, so we will
E           # also disallow it on the RHS.
E           a=(x y )
E           a+=z
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[append.test.sh::typeset s+=(my array)[L68]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090802f0>
test_file = 'append.test.sh'
test_case = TestCase(name='typeset s+=(my array)', script='typeset s=\'abc\'\necho $s\n\ntypeset s+=(d e f)\necho status=$?\nargv....e='stderr', value='typeset: not valid in this context: s+', shells=['zsh'], variant='N-I')], line_number=68, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: typeset s+=(my array) (line 68)
E           
E           stdout mismatch:
E             expected: "abc\nstatus=0\n['abc', 'd', 'e', 'f']"
E             actual:   'abc\nstatus=1\n[]'
E           
E           Expected stdout: "abc\nstatus=0\n['abc', 'd', 'e', 'f']"
E           Actual stdout:   'abc\nstatus=1\n[]\n'
E           Expected stderr: None
E           Actual stderr:   "bash: declare: `s+=(d e f)': not a valid identifier\n"
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset s='abc'
E           echo $s
E           
E           typeset s+=(d e f)
E           echo status=$?
E           argv.py "${s[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[append.test.sh::error: typeset myarray+=s[L87]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090803b0>
test_file = 'append.test.sh'
test_case = TestCase(name='error: typeset myarray+=s', script='typeset a=(x y)\nargv.py "${a[@]}"\ntypeset a+=s\nargv.py "${a[@]}"...bash'], variant='BUG'), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=87, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: error: typeset myarray+=s (line 87)
E           
E           stdout mismatch:
E             expected: "['x', 'y']\n['xs', 'y']"
E             actual:   '[]\n[]'
E           
E           Expected stdout: "['x', 'y']\n['xs', 'y']"
E           Actual stdout:   '[]\n[]\n'
E           Expected stderr: None
E           Actual stderr:   "bash: declare: `a+=s': not a valid identifier\n"
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset a=(x y)
E           argv.py "${a[@]}"
E           typeset a+=s
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[append.test.sh::error: append used like env prefix[L105]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109080470>
test_file = 'append.test.sh'
test_case = TestCase(name='error: append used like env prefix', script="# This should be an error in other shells but it's not.\nA...sh'], variant='BUG'), Assertion(type='stdout', value='a', shells=['mksh'], variant='BUG')], line_number=105, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: error: append used like env prefix (line 105)
E           
E           stdout mismatch:
E             expected: 'aa'
E             actual:   'a'
E           
E           Expected stdout: 'aa'
E           Actual stdout:   'a\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # This should be an error in other shells but it's not.
E           A=a
E           A+=a printenv.py A
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[append.test.sh::myarray[-1]+=s - Append to last element[L125]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090805f0>
test_file = 'append.test.sh'
test_case = TestCase(name='myarray[-1]+=s - Append to last element', script='# Works in bash, but not mksh.  It seems like bash is...ne), Assertion(type='stdout', value="['1', '2 3', ' 4']", shells=['mksh'], variant='BUG')], line_number=125, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: myarray[-1]+=s - Append to last element (line 125)
E           
E           stdout mismatch:
E             expected: "['1', '2 3 4']"
E             actual:   "[' 4', '1', '2 3']"
E           
E           Expected stdout: "['1', '2 3 4']"
E           Actual stdout:   "[' 4', '1', '2 3']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Works in bash, but not mksh.  It seems like bash is doing the right thing.
E           # a[-1] is allowed on the LHS.  mksh doesn't have negative indexing?
E           a=(1 '2 3')
E           a[-1]+=' 4'
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[append.test.sh::typeset s+=[L164]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109080830>
test_file = 'append.test.sh'
test_case = TestCase(name='typeset s+=', script="typeset s+=foo\necho s=$s\n\n# bash and mksh agree that this does NOT respect set...], variant='N-I'), Assertion(type='stdout-json', value='', shells=['zsh'], variant='N-I')], line_number=164, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: typeset s+= (line 164)
E           
E           stdout mismatch:
E             expected: 's=foo\nt=foo\nt=foofoo'
E             actual:   's='
E           
E           Expected stdout: 's=foo\nt=foo\nt=foofoo'
E           Actual stdout:   's=\n'
E           Expected stderr: None
E           Actual stderr:   "bash: declare: `s+=foo': not a valid identifier\nbash: declare: `t+=foo': not a valid identifier\nbash: t: unbound variable\n"
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           typeset s+=foo
E           echo s=$s
E           
E           # bash and mksh agree that this does NOT respect set -u.
E           # I think that's a mistake, but += is a legacy construct, so let's copy it.
E           
E           set -u
E           
E           typeset t+=foo
E           echo t=$t
E           typeset t+=foo
E           echo t=$t
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[append.test.sh::typeset s${dyn}+=[L186]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090808f0>
test_file = 'append.test.sh'
test_case = TestCase(name='typeset s${dyn}+=', script="dyn=x\n\ntypeset s${dyn}+=foo\necho sx=$sx\n\n# bash and mksh agree that th...], variant='N-I'), Assertion(type='stdout-json', value='', shells=['zsh'], variant='N-I')], line_number=186, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: typeset s${dyn}+= (line 186)
E           
E           stdout mismatch:
E             expected: 'sx=foo\ntx=foo\ntx=foofoo'
E             actual:   'sx='
E           
E           Expected stdout: 'sx=foo\ntx=foo\ntx=foofoo'
E           Actual stdout:   'sx=\n'
E           Expected stderr: None
E           Actual stderr:   "bash: declare: `sx+=foo': not a valid identifier\nbash: declare: `tx+=foo': not a valid identifier\nbash: tx: unbound variable\n"
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           dyn=x
E           
E           typeset s${dyn}+=foo
E           echo sx=$sx
E           
E           # bash and mksh agree that this does NOT respect set -u.
E           # I think that's a mistake, but += is a legacy construct, so let's copy it.
E           
E           set -u
E           
E           typeset t${dyn}+=foo
E           echo tx=$tx
E           typeset t${dyn}+=foo
E           echo tx=$tx
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[append.test.sh::export readonly +=[L210]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090809b0>
test_file = 'append.test.sh'
test_case = TestCase(name='export readonly +=', script='export e+=foo\necho e=$e\n\nreadonly r+=bar\necho r=$r\n\nset -u\n\nexport...], variant='N-I'), Assertion(type='stdout-json', value='', shells=['zsh'], variant='N-I')], line_number=210, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: export readonly += (line 210)
E           
E           stdout mismatch:
E             expected: 'e=foo\nr=bar\ne=foofoo'
E             actual:   'e=\nr='
E           
E           Expected stdout: 'e=foo\nr=bar\ne=foofoo'
E           Actual stdout:   'e=\nr=\n'
E           Expected stderr: None
E           Actual stderr:   "bash: export: 'e+': not a valid identifier\nbash: export: 'e+': not a valid identifier\nbash: e: unbound variable\n"
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           export e+=foo
E           echo e=$e
E           
E           readonly r+=bar
E           echo r=$r
E           
E           set -u
E           
E           export e+=foo
E           echo e=$e
E           
E           #readonly r+=foo
E           #echo r=$e
E           ---

tests/spec_tests/test_spec.py:218: Failed
_______ TestBashSpecTests.test_spec_case[append.test.sh::local +=[L234]] _______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109080a70>
test_file = 'append.test.sh'
test_case = TestCase(name='local +=', script='f() {\n  local s+=foo\n  echo s=$s\n\n  set -u\n  local s+=foo\n  echo s=$s\n}\n\nf'...], variant='N-I'), Assertion(type='stdout-json', value='', shells=['zsh'], variant='N-I')], line_number=234, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: local += (line 234)
E           
E           stdout mismatch:
E             expected: 's=foo\ns=foofoo'
E             actual:   ''
E           
E           Expected stdout: 's=foo\ns=foofoo'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: s: unbound variable\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           f() {
E             local s+=foo
E             echo s=$s
E           
E             set -u
E             local s+=foo
E             echo s=$s
E           }
E           
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[append.test.sh::assign builtin appending array: declare d+=(d e)[L253]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109080b30>
test_file = 'append.test.sh'
test_case = TestCase(name='assign builtin appending array: declare d+=(d e)', script='declare d+=(d e)\necho "${d[@]}"\ndeclare d+...], variant='N-I'), Assertion(type='stdout-json', value='', shells=['zsh'], variant='N-I')], line_number=253, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: assign builtin appending array: declare d+=(d e) (line 253)
E           
E           stdout mismatch:
E             expected: 'd e\nd e c l\nr e\nl o\nl o c a'
E             actual:   ''
E           
E           Expected stdout: 'd e\nd e c l\nr e\nl o\nl o c a'
E           Actual stdout:   '\n\n\n\n\n'
E           Expected stderr: None
E           Actual stderr:   "bash: declare: `d+=(d e)': not a valid identifier\nbash: declare: `d+=(c l)': not a valid identifier\nbash: local: 'l+': not a valid identifier\nbash: local: 'l+': not a valid identifier\n"
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare d+=(d e)
E           echo "${d[@]}"
E           declare d+=(c l)
E           echo "${d[@]}"
E           
E           readonly r+=(r e)
E           echo "${r[@]}"
E           # can't do this again
E           
E           f() {
E             local l+=(l o)
E             echo "${l[@]}"
E           
E             local l+=(c a)
E             echo "${l[@]}"
E           }
E           
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[arg-parse.test.sh::shift 1 extra[L13]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109080e30>
test_file = 'arg-parse.test.sh'
test_case = TestCase(name='shift 1 extra', script="$SH -c '\nset -- a b c\nshift 1 extra\n'\nif test $? -eq 0; then\n  echo fail\n...sertion(type='stdout', value='fail', shells=['dash', 'mksh', 'zsh', 'ash'], variant='BUG')], line_number=13, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: shift 1 extra (line 13)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'fail'
E           
E           Expected stdout: ''
E           Actual stdout:   'fail\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           $SH -c '
E           set -- a b c
E           shift 1 extra
E           '
E           if test $? -eq 0; then
E             echo fail
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arg-parse.test.sh::continue 1 extra, break, etc.[L29]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109080ef0>
test_file = 'arg-parse.test.sh'
test_case = TestCase(name='continue 1 extra, break, etc.', script="$SH -c '\nfor i in foo; do\n  continue 1 extra\ndone\necho stat...nt='BUG'), Assertion(type='stdout', value='status=1\nfail', shells=['zsh'], variant='BUG')], line_number=29, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: continue 1 extra, break, etc. (line 29)
E           
E           Execution error: Expected 'done' to close for loop at line 1, column 17
E           
E           
E           Script:
E           ---
E           $SH -c '
E           for i in foo; do
E             continue 1 extra
E           done
E           echo status=$?
E           '
E           if test $? -eq 0; then
E             echo fail
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith-context.test.sh::Multiple right brackets inside expression[L12]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109080fb0>
test_file = 'arith-context.test.sh'
test_case = TestCase(name='Multiple right brackets inside expression', script='a=(1 2 3)\necho ${a[a[0]]} ${a[a[a[0]]]}', assertio...['zsh'], variant='N-I'), Assertion(type='stdout', value='', shells=['zsh'], variant='N-I')], line_number=12, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Multiple right brackets inside expression (line 12)
E           
E           stdout mismatch:
E             expected: '2 3'
E             actual:   '1 1'
E           
E           Expected stdout: '2 3'
E           Actual stdout:   '1 1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(1 2 3)
E           echo ${a[a[0]]} ${a[a[a[0]]]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith-context.test.sh::Slicing of string with variables[L31]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109081130>
test_file = 'arith-context.test.sh'
test_case = TestCase(name='Slicing of string with variables', script="s='abcd'\nzero=0\none=1\necho ${s:$zero} ${s:$zero:4} ${s:$o...ne}", assertions=[Assertion(type='stdout', value='abcd abcd b', shells=None, variant=None)], line_number=31, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Slicing of string with variables (line 31)
E           
E           stdout mismatch:
E             expected: 'abcd abcd b'
E             actual:   ''
E           
E           Expected stdout: 'abcd abcd b'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           s='abcd'
E           zero=0
E           one=1
E           echo ${s:$zero} ${s:$zero:4} ${s:$one:$one}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith-context.test.sh::Array index on LHS of assignment[L38]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090811f0>
test_file = 'arith-context.test.sh'
test_case = TestCase(name='Array index on LHS of assignment', script='a=(1 2 3)\nzero=0\na[zero+5-4]=X\necho ${a[@]}', assertions=...None, variant=None), Assertion(type='stdout', value='X 2 3', shells=['zsh'], variant='OK')], line_number=38, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Array index on LHS of assignment (line 38)
E           
E           stdout mismatch:
E             expected: '1 X 3'
E             actual:   '1 2 3'
E           
E           Expected stdout: '1 X 3'
E           Actual stdout:   '1 2 3\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(1 2 3)
E           zero=0
E           a[zero+5-4]=X
E           echo ${a[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith-context.test.sh::Array index on LHS with indices[L46]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090812b0>
test_file = 'arith-context.test.sh'
test_case = TestCase(name='Array index on LHS with indices', script='a=(1 2 3)\na[a[1]]=X\necho ${a[@]}', assertions=[Assertion(ty...None, variant=None), Assertion(type='stdout', value='X 2 3', shells=['zsh'], variant='OK')], line_number=46, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Array index on LHS with indices (line 46)
E           
E           stdout mismatch:
E             expected: '1 2 X'
E             actual:   '1 2 3'
E           
E           Expected stdout: '1 2 X'
E           Actual stdout:   '1 2 3\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(1 2 3)
E           a[a[1]]=X
E           echo ${a[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith-context.test.sh::Slicing of string with expressions[L53]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109081370>
test_file = 'arith-context.test.sh'
test_case = TestCase(name='Slicing of string with expressions', script="# mksh accepts ${s:0} and ${s:$zero} but not ${s:zero}\n# ...=['zsh'], variant='BUG'), Assertion(type='status', value=1, shells=['zsh'], variant='BUG')], line_number=53, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Slicing of string with expressions (line 53)
E           
E           stdout mismatch:
E             expected: 'abcd abcd b'
E             actual:   ''
E           
E           Expected stdout: 'abcd abcd b'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # mksh accepts ${s:0} and ${s:$zero} but not ${s:zero}
E           # zsh says unrecognized modifier 'z'
E           s='abcd'
E           zero=0
E           echo ${s:zero} ${s:zero+0} ${s:zero+1:zero+1}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith-context.test.sh::Ambiguous colon in slice[L65]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109081430>
test_file = 'arith-context.test.sh'
test_case = TestCase(name='Ambiguous colon in slice', script="s='abcd'\necho $(( 0 < 1 ? 2 : 0 ))  # evaluates to 2\necho ${s: 0 <..., variant='BUG'), Assertion(type='status', value=1, shells=['mksh', 'zsh'], variant='BUG')], line_number=65, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Ambiguous colon in slice (line 65)
E           
E           stdout mismatch:
E             expected: '2\nc'
E             actual:   '2'
E           
E           Expected stdout: '2\nc'
E           Actual stdout:   '2\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           s='abcd'
E           echo $(( 0 < 1 ? 2 : 0 ))  # evaluates to 2
E           echo ${s: 0 < 1 ? 2 : 0 : 1}  # 2:1 -- TRICKY THREE COLONS
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith-context.test.sh::Triple parens should be disambiguated[L78]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090814f0>
test_file = 'arith-context.test.sh'
test_case = TestCase(name='Triple parens should be disambiguated', script='# The first paren is part of the math, parens 2 and 3 a...1 + (2*3)))', assertions=[Assertion(type='stdout', value='7 7', shells=None, variant=None)], line_number=78, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Triple parens should be disambiguated (line 78)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 3, column 15
E           
E           
E           Script:
E           ---
E           # The first paren is part of the math, parens 2 and 3 are a single token ending
E           # arith sub.
E           ((a=1 + (2*3)))
E           echo $a $((1 + (2*3)))
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith-context.test.sh::Quadruple parens should be disambiguated[L87]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090815b0>
test_file = 'arith-context.test.sh'
test_case = TestCase(name='Quadruple parens should be disambiguated', script='((a=1 + (2 * (3+4))))\necho $a $((1 + (2 * (3+4))))', assertions=[Assertion(type='stdout', value='15 15', shells=None, variant=None)], line_number=87, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Quadruple parens should be disambiguated (line 87)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 1, column 20
E           
E           
E           Script:
E           ---
E           ((a=1 + (2 * (3+4))))
E           echo $a $((1 + (2 * (3+4))))
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith-context.test.sh::Empty expression (( ))  $(( ))[L133]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090818b0>
test_file = 'arith-context.test.sh'
test_case = TestCase(name='Empty expression (( ))  $(( ))', script='(( ))\necho status=$?\n\necho $(( ))\n\n#echo $[]', assertions=[Assertion(type='stdout', value='status=1\n0', shells=None, variant=None)], line_number=133, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Empty expression (( ))  $(( )) (line 133)
E           
E           stdout mismatch:
E             expected: 'status=1\n0'
E             actual:   'status=0\n0'
E           
E           Expected stdout: 'status=1\n0'
E           Actual stdout:   'status=0\n0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           (( ))
E           echo status=$?
E           
E           echo $(( ))
E           
E           #echo $[]
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith-context.test.sh::Empty expression in ${a[@]: : }[L162]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109081a30>
test_file = 'arith-context.test.sh'
test_case = TestCase(name='Empty expression in ${a[@]: : }', script="a=(a b c d e f)\n\n# space required here -- see spec/var-op-s...ksh'], variant='BUG'), Assertion(type='stdout', value='', shells=['mksh'], variant='BUG')], line_number=162, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Empty expression in ${a[@]: : } (line 162)
E           
E           stdout mismatch:
E             expected: 'slice a b c d e f\nstatus=0\n\nslice\nstatus=0\n\nslice\nstatus=0'
E             actual:   'slice\nstatus=0\n\nslice\nstatus=0\n\nslice\nstatus=0'
E           
E           Expected stdout: 'slice a b c d e f\nstatus=0\n\nslice\nstatus=0\n\nslice\nstatus=0'
E           Actual stdout:   'slice\nstatus=0\n\nslice\nstatus=0\n\nslice\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(a b c d e f)
E           
E           # space required here -- see spec/var-op-slice
E           echo slice ${a[@]: }
E           echo status=$?
E           echo
E           
E           echo slice ${a[@]: : }
E           echo status=$?
E           echo
E           
E           # zsh doesn't accept this
E           echo slice ${a[@]:: }
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith-context.test.sh::Empty expression a[][L205]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109081af0>
test_file = 'arith-context.test.sh'
test_case = TestCase(name='Empty expression a[]', script='a=(1 2 3)\n\na[]=42\necho status=$?\necho ${a[@]}\n\necho ${a[]}\necho s...on(type='stdout', value='status=0\n42 2 3\n42\nstatus=0', shells=['mksh'], variant='BUG')], line_number=205, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Empty expression a[] (line 205)
E           
E           stdout mismatch:
E             expected: 'status=1\n1 2 3\nstatus=1'
E             actual:   'status=0\n1 2 3\n42\nstatus=0'
E           
E           Expected stdout: 'status=1\n1 2 3\nstatus=1'
E           Actual stdout:   'status=0\n1 2 3\n42\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(1 2 3)
E           
E           a[]=42
E           echo status=$?
E           echo ${a[@]}
E           
E           echo ${a[]}
E           echo status=$?
E           
E           
E           
E           # runtime failures
E           
E           
E           
E           
E           # Others 
E           # [ 1+2 -eq 3 ]
E           # [[ 1+2 -eq 3 ]]
E           # unset a[]
E           # printf -v a[]
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[arith-dynamic.test.sh::Double quotes[L6]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109081bb0>
test_file = 'arith-dynamic.test.sh'
test_case = TestCase(name='Double quotes', script='echo $(( "1 + 2" * 3 ))\necho $(( "1+2" * 3 ))', assertions=[Assertion(type='st...=['zsh'], variant='N-I'), Assertion(type='stdout', value='', shells=['zsh'], variant='N-I')], line_number=6, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Double quotes (line 6)
E           
E           stdout mismatch:
E             expected: '7\n7'
E             actual:   '0\n0'
E           
E           Expected stdout: '7\n7'
E           Actual stdout:   '0\n0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo $(( "1 + 2" * 3 ))
E           echo $(( "1+2" * 3 ))
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith-dynamic.test.sh::Single quotes[L26]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109081c70>
test_file = 'arith-dynamic.test.sh'
test_case = TestCase(name='Single quotes', script="echo $(( '1' + '2' * 3 ))\necho status=$?\n\necho $(( '1 + 2' * 3 ))\necho stat...['zsh'], variant='N-I'), Assertion(type='stdout', value='', shells=['zsh'], variant='N-I')], line_number=26, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Single quotes (line 26)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1'
E             actual:   '0\nstatus=0\n0\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1'
E           Actual stdout:   '0\nstatus=0\n0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo $(( '1' + '2' * 3 ))
E           echo status=$?
E           
E           echo $(( '1 + 2' * 3 ))
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith-dynamic.test.sh::Substitutions[L51]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109081d30>
test_file = 'arith-dynamic.test.sh'
test_case = TestCase(name='Substitutions', script='x=\'1 + 2\'\necho $(( $x * 3 ))\necho $(( "$x" * 3 ))', assertions=[Assertion(t...'zsh'], variant='N-I'), Assertion(type='stdout', value='7', shells=['zsh'], variant='N-I')], line_number=51, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Substitutions (line 51)
E           
E           stdout mismatch:
E             expected: '7\n7'
E             actual:   '0\n0'
E           
E           Expected stdout: '7\n7'
E           Actual stdout:   '0\n0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x='1 + 2'
E           echo $(( $x * 3 ))
E           echo $(( "$x" * 3 ))
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith-dynamic.test.sh::Variable references[L75]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109081df0>
test_file = 'arith-dynamic.test.sh'
test_case = TestCase(name='Variable references', script="x='1'\necho $(( x + 2 * 3 ))\necho status=$?\n\n# Expression like values ...iant='N-I'), Assertion(type='stdout', value='7\nstatus=0', shells=['dash'], variant='N-I')], line_number=75, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Variable references (line 75)
E           
E           stdout mismatch:
E             expected: '7\nstatus=0\n9\nstatus=0'
E             actual:   '7\nstatus=0\n0\nstatus=0'
E           
E           Expected stdout: '7\nstatus=0\n9\nstatus=0'
E           Actual stdout:   '7\nstatus=0\n0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x='1'
E           echo $(( x + 2 * 3 ))
E           echo status=$?
E           
E           # Expression like values are evaluated first (this is unlike double quotes)
E           x='1 + 2'
E           echo $(( x * 3 ))
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::Side Effect in Array Indexing[L17]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109081eb0>
test_file = 'arith.test.sh'
test_case = TestCase(name='Side Effect in Array Indexing', script='a=(4 5 6)\necho "${a[b=2]} b=$b"', assertions=[Assertion(type='...'dash'], variant='N-I'), Assertion(type='status', value=2, shells=['dash'], variant='N-I')], line_number=17, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Side Effect in Array Indexing (line 17)
E           
E           stdout mismatch:
E             expected: '6 b=2'
E             actual:   '4 b='
E           
E           Expected stdout: '6 b=2'
E           Actual stdout:   '4 b=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(4 5 6)
E           echo "${a[b=2]} b=$b"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::Arith sub with word parts[L48]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109082330>
test_file = 'arith.test.sh'
test_case = TestCase(name='Arith sub with word parts', script='# Making 13 from two different kinds of sub.  Geez.\necho $((1 + $(...efined:-3}))', assertions=[Assertion(type='stdout', value='14', shells=None, variant=None)], line_number=48, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Arith sub with word parts (line 48)
E           
E           stdout mismatch:
E             expected: '14'
E             actual:   '1'
E           
E           Expected stdout: '14'
E           Actual stdout:   '1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Making 13 from two different kinds of sub.  Geez.
E           echo $((1 + $(echo 1)${undefined:-3}))
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::Backticks within arith sub[L66]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109082570>
test_file = 'arith.test.sh'
test_case = TestCase(name='Backticks within arith sub', script='# This is unnecessary but works in all shells.\necho $((`echo 1` + 2))', assertions=[Assertion(type='stdout', value='3', shells=None, variant=None)], line_number=66, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Backticks within arith sub (line 66)
E           
E           stdout mismatch:
E             expected: '3'
E             actual:   '2'
E           
E           Expected stdout: '3'
E           Actual stdout:   '2\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # This is unnecessary but works in all shells.
E           echo $((`echo 1` + 2))
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::Integer constant validation[L137]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109082870>
test_file = 'arith.test.sh'
test_case = TestCase(name='Integer constant validation', script='check() {\n  $SH -c "shopt --set strict_arith; echo $1"\n  echo s...t', value='status=1\n9\nstatus=0\nstatus=1\n6\nstatus=0', shells=['mksh'], variant='BUG')], line_number=137, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Integer constant validation (line 137)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1\nstatus=1\nstatus=1'
E             actual:   '0\nstatus=0\n9\nstatus=0\n0\nstatus=0\n6\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1\nstatus=1\nstatus=1'
E           Actual stdout:   '0\nstatus=0\n9\nstatus=0\n0\nstatus=0\n6\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   'bash: shopt: --: invalid option\nbash: shopt: --: invalid option\nbash: shopt: --: invalid option\nbash: shopt: --: invalid option\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           check() {
E             $SH -c "shopt --set strict_arith; echo $1"
E             echo status=$?
E           }
E           
E           check '$(( 0x1X ))'
E           check '$(( 09 ))'
E           check '$(( 2#A ))'
E           check '$(( 02#0110 ))'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::Increment undefined variables with nounset[L235]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109082db0>
test_file = 'arith.test.sh'
test_case = TestCase(name='Increment undefined variables with nounset', script='set -o nounset\n(( undef1++ ))\n(( ++undef2 ))\nec...t='BUG'), Assertion(type='stdout', value='[1][1]', shells=['mksh', 'zsh'], variant='BUG')], line_number=235, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Increment undefined variables with nounset (line 235)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   '[1][1]'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   '[1][1]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           set -o nounset
E           (( undef1++ ))
E           (( ++undef2 ))
E           echo "[$undef1][$undef2]"
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[arith.test.sh::No floating point[L334]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090833b0>
test_file = 'arith.test.sh'
test_case = TestCase(name='No floating point', script='echo $((1 + 2.3))', assertions=[Assertion(type='status', value=2, shells=No... 'mksh'], variant='OK'), Assertion(type='status', value=0, shells=['zsh'], variant='BUG')], line_number=334, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: No floating point (line 334)
E           
E           status mismatch: expected one of [2, 1], got 0
E           
E           Expected stdout: None
E           Actual stdout:   '1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           echo $((1 + 2.3))
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::Dynamic base constants[L370]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109083770>
test_file = 'arith.test.sh'
test_case = TestCase(name='Dynamic base constants', script='base=16\necho $(( ${base}#a ))', assertions=[Assertion(type='stdout', ...dash'], variant='N-I'), Assertion(type='status', value=2, shells=['dash'], variant='N-I')], line_number=370, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Dynamic base constants (line 370)
E           
E           stdout mismatch:
E             expected: '10'
E             actual:   '0'
E           
E           Expected stdout: '10'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           base=16
E           echo $(( ${base}#a ))
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::Dynamic octal constant[L382]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090838f0>
test_file = 'arith.test.sh'
test_case = TestCase(name='Dynamic octal constant', script='zero=0\necho $(( ${zero}11 ))', assertions=[Assertion(type='stdout', v...ariant=None), Assertion(type='stdout', value='11', shells=['mksh', 'zsh'], variant='N-I')], line_number=382, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Dynamic octal constant (line 382)
E           
E           stdout mismatch:
E             expected: '9'
E             actual:   '0'
E           
E           Expected stdout: '9'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           zero=0
E           echo $(( ${zero}11 ))
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::Dynamic hex constants[L388]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090839b0>
test_file = 'arith.test.sh'
test_case = TestCase(name='Dynamic hex constants', script='zero=0\necho $(( ${zero}xAB ))', assertions=[Assertion(type='stdout', value='171', shells=None, variant=None)], line_number=388, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Dynamic hex constants (line 388)
E           
E           stdout mismatch:
E             expected: '171'
E             actual:   '0'
E           
E           Expected stdout: '171'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           zero=0
E           echo $(( ${zero}xAB ))
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::Dynamic var names - result of runtime parse/eval[L397]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109083b30>
test_file = 'arith.test.sh'
test_case = TestCase(name='Dynamic var names - result of runtime parse/eval', script='foo=5\nx=oo\necho $(( foo + f$x + 1 ))', assertions=[Assertion(type='stdout', value='11', shells=None, variant=None)], line_number=397, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Dynamic var names - result of runtime parse/eval (line 397)
E           
E           stdout mismatch:
E             expected: '11'
E             actual:   '6'
E           
E           Expected stdout: '11'
E           Actual stdout:   '6\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           foo=5
E           x=oo
E           echo $(( foo + f$x + 1 ))
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::Recursive name evaluation is a result of runtime parse/eval[L403]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109083bf0>
test_file = 'arith.test.sh'
test_case = TestCase(name='Recursive name evaluation is a result of runtime parse/eval', script='foo=5\nbar=foo\nspam=bar\neggs=sp...dash'], variant='N-I'), Assertion(type='status', value=2, shells=['dash'], variant='N-I')], line_number=403, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Recursive name evaluation is a result of runtime parse/eval (line 403)
E           
E           stdout mismatch:
E             expected: '6 6 6 6'
E             actual:   '6 1 1 1'
E           
E           Expected stdout: '6 6 6 6'
E           Actual stdout:   '6 1 1 1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           foo=5
E           bar=foo
E           spam=bar
E           eggs=spam
E           echo $((foo+1)) $((bar+1)) $((spam+1)) $((eggs+1))
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::nounset with arithmetic[L413]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109083cb0>
test_file = 'arith.test.sh'
test_case = TestCase(name='nounset with arithmetic', script='set -o nounset\nx=$(( y + 5 ))\necho "should not get here: x=${x:-<un...='BUG'), Assertion(type='status', value=0, shells=['dash', 'mksh', 'zsh'], variant='BUG')], line_number=413, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: nounset with arithmetic (line 413)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'should not get here: x=5'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'should not get here: x=5\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           set -o nounset
E           x=$(( y + 5 ))
E           echo "should not get here: x=${x:-<unset>}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::Invalid LValue: two sets of brackets[L536]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909c0b0>
test_file = 'arith.test.sh'
test_case = TestCase(name='Invalid LValue: two sets of brackets', script='(( a[1][2] = 3 ))\necho "status=$?"\n#   shells treat th...dash'], variant='N-I'), Assertion(type='status', value=0, shells=['dash'], variant='N-I')], line_number=536, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Invalid LValue: two sets of brackets (line 536)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   'status=0'
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           (( a[1][2] = 3 ))
E           echo "status=$?"
E           #   shells treat this as a NON-fatal error
E           #   dash doesn't implement assignment
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::Comment not allowed in the middle of multiline arithmetic[L587]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909c470>
test_file = 'arith.test.sh'
test_case = TestCase(name='Comment not allowed in the middle of multiline arithmetic', script='echo $((\n1 +\n2 + \\\n3\n))\necho ...['bash'], variant='OK'), Assertion(type='status', value=0, shells=['bash'], variant='OK')], line_number=587, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Comment not allowed in the middle of multiline arithmetic (line 587)
E           
E           stdout mismatch:
E             expected: '6\n[]'
E             actual:   '3\n1\n[3]'
E           
E           Expected stdout: '6\n[]'
E           Actual stdout:   '3\n1\n[3]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           echo $((
E           1 +
E           2 + \
E           3
E           ))
E           echo $((
E           1 + 2  # not a comment
E           ))
E           (( a = 3 + 4  # comment
E           ))
E           echo [$a]
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[arith.test.sh::Double subscript[L631]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909c6b0>
test_file = 'arith.test.sh'
test_case = TestCase(name='Double subscript', script='a=(1 2 3)\necho $(( a[1] ))\necho $(( a[1][1] ))', assertions=[Assertion(typ...dash'], variant='N-I'), Assertion(type='stdout', value='1', shells=['zsh'], variant='OK')], line_number=631, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Double subscript (line 631)
E           
E           stdout mismatch:
E             expected: '2'
E             actual:   '2\n1'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: '2'
E           Actual stdout:   '2\n1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(1 2 3)
E           echo $(( a[1] ))
E           echo $(( a[1][1] ))
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::assignment with dynamic var name[L703]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909c9b0>
test_file = 'arith.test.sh'
test_case = TestCase(name='assignment with dynamic var name', script='foo=bar\necho $(( x$foo = 42 ))\necho xbar=$xbar', assertions=[Assertion(type='stdout', value='42\nxbar=42', shells=None, variant=None)], line_number=703, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: assignment with dynamic var name (line 703)
E           
E           stdout mismatch:
E             expected: '42\nxbar=42'
E             actual:   '0\nxbar='
E           
E           Expected stdout: '42\nxbar=42'
E           Actual stdout:   '0\nxbar=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           foo=bar
E           echo $(( x$foo = 42 ))
E           echo xbar=$xbar
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::array assignment with dynamic array name[L712]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909ca70>
test_file = 'arith.test.sh'
test_case = TestCase(name='array assignment with dynamic array name', script='foo=bar\necho $(( x$foo[5] = 42 ))\necho "xbar[5]="$..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['dash'], variant='N-I')], line_number=712, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: array assignment with dynamic array name (line 712)
E           
E           stdout mismatch:
E             expected: '42\nxbar[5]=42'
E             actual:   '0\nxbar[5]='
E           
E           Expected stdout: '42\nxbar[5]=42'
E           Actual stdout:   '0\nxbar[5]=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           foo=bar
E           echo $(( x$foo[5] = 42 ))
E           echo "xbar[5]="${xbar[5]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::unary assignment with dynamic var name[L727]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909cb30>
test_file = 'arith.test.sh'
test_case = TestCase(name='unary assignment with dynamic var name', script='foo=bar\nxbar=42\necho $(( x$foo++ ))\necho xbar=$xbar..., variant='BUG'), Assertion(type='stdout-json', value='', shells=['dash'], variant='BUG')], line_number=727, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: unary assignment with dynamic var name (line 727)
E           
E           stdout mismatch:
E             expected: '42\nxbar=43'
E             actual:   '0\nxbar=42'
E           
E           Expected stdout: '42\nxbar=43'
E           Actual stdout:   '0\nxbar=42\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           foo=bar
E           xbar=42
E           echo $(( x$foo++ ))
E           echo xbar=$xbar
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::unary array assignment with dynamic var name[L739]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909cbf0>
test_file = 'arith.test.sh'
test_case = TestCase(name='unary array assignment with dynamic var name', script='foo=bar\nxbar[5]=42\necho $(( x$foo[5]++ ))\nech..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['dash'], variant='N-I')], line_number=739, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: unary array assignment with dynamic var name (line 739)
E           
E           stdout mismatch:
E             expected: '42\nxbar[5]=43'
E             actual:   '0\nxbar[5]=42'
E           
E           Expected stdout: '42\nxbar[5]=43'
E           Actual stdout:   '0\nxbar[5]=42\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           foo=bar
E           xbar[5]=42
E           echo $(( x$foo[5]++ ))
E           echo "xbar[5]="${xbar[5]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::Dynamic parsing of arithmetic[L755]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909ccb0>
test_file = 'arith.test.sh'
test_case = TestCase(name='Dynamic parsing of arithmetic', script='e=1+2\necho $(( e + 3 ))\n[[ e -eq 3 ]] && echo true\n[ e -eq 3..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['dash'], variant='N-I')], line_number=755, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Dynamic parsing of arithmetic (line 755)
E           
E           stdout mismatch:
E             expected: '6\ntrue\nstatus=2'
E             actual:   '3\nstatus=2'
E           
E           Expected stdout: '6\ntrue\nstatus=2'
E           Actual stdout:   '3\nstatus=2\n'
E           Expected stderr: None
E           Actual stderr:   'bash: [: integer expression expected\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           e=1+2
E           echo $(( e + 3 ))
E           [[ e -eq 3 ]] && echo true
E           [ e -eq 3 ]
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::1 ? a=1 : b=2 ( bug fix)[L791]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909cef0>
test_file = 'arith.test.sh'
test_case = TestCase(name='1 ? a=1 : b=2 ( bug fix)', script='echo $((1 ? a=1 : 42 ))\necho a=$a\n\n# this does NOT work\n#echo $(...['zsh'], variant='BUG'), Assertion(type='status', value=1, shells=['zsh'], variant='BUG')], line_number=791, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: 1 ? a=1 : b=2 ( bug fix) (line 791)
E           
E           stdout mismatch:
E             expected: '1\na=1'
E             actual:   '0\na='
E           
E           Expected stdout: '1\na=1'
E           Actual stdout:   '0\na=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo $((1 ? a=1 : 42 ))
E           echo a=$a
E           
E           # this does NOT work
E           #echo $((1 ? a=1 : b=2 ))
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[arith.test.sh::Invalid constant[L805]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909cfb0>
test_file = 'arith.test.sh'
test_case = TestCase(name='Invalid constant', script="echo $((a + x42))\necho status=$?\n\n# weird asymmetry -- the above is a syn...n(type='stdout', value='0\nstatus=0\nstatus=1\nstatus=1', shells=['bash'], variant='BUG')], line_number=805, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Invalid constant (line 805)
E           
E           stdout mismatch:
E             expected: '0\nstatus=0\nstatus=1\nstatus=1'
E             actual:   '0\nstatus=0\n0\nstatus=0\n0\nstatus=0'
E           
E           Expected stdout: '0\nstatus=0\nstatus=1\nstatus=1'
E           Actual stdout:   '0\nstatus=0\n0\nstatus=0\n0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           echo $((a + x42))
E           echo status=$?
E           
E           # weird asymmetry -- the above is a syntax error, but this isn't
E           $SH -c 'echo $((a + 42x))'
E           echo status=$?
E           
E           # regression
E           echo $((a + 42x))
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::Negative numbers with bit shift[L888]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909d1f0>
test_file = 'arith.test.sh'
test_case = TestCase(name='Negative numbers with bit shift', script="echo $(( 5 << 1 ))\necho $(( 5 << 0 ))\n$SH -c 'echo $(( 5 <<...dout', value='10\n5\n-2147483648\n---\n8\n16\n0\n0\n---', shells=['mksh'], variant='BUG')], line_number=888, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Negative numbers with bit shift (line 888)
E           
E           stdout mismatch:
E             expected: '10\n5\n-9223372036854775808\n---\n8\n16\n0\n0\n---'
E             actual:   '10\n5'
E           
E           Expected stdout: '10\n5\n-9223372036854775808\n---\n8\n16\n0\n0\n---'
E           Actual stdout:   '10\n5\n'
E           Expected stderr: None
E           Actual stderr:   'bash: negative shift count\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           echo $(( 5 << 1 ))
E           echo $(( 5 << 0 ))
E           $SH -c 'echo $(( 5 << -1 ))'  # implementation defined - OSH fails
E           echo ---
E           
E           echo $(( 16 >> 1 ))
E           echo $(( 16 >> 0 ))
E           $SH -c 'echo $(( 16 >> -1 ))'  # not sure why this is zero
E           $SH -c 'echo $(( 16 >> -2 ))'  # also 0
E           echo ---
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::undef[0] with nounset[L961]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909d370>
test_file = 'arith.test.sh'
test_case = TestCase(name='undef[0] with nounset', script='case $SH in dash) exit ;; esac\n\nset -o nounset\necho UNSET $(( undef[...zsh'], variant='BUG'), Assertion(type='stdout', value='', shells=['dash'], variant='N-I')], line_number=961, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: undef[0] with nounset (line 961)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'UNSET 0\nstatus=0'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'UNSET 0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           set -o nounset
E           echo UNSET $(( undef[0] ))
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::s[0] with string 42[L1005]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909d4f0>
test_file = 'arith.test.sh'
test_case = TestCase(name='s[0] with string 42', script="case $SH in dash) exit ;; esac\n\ns='42'\necho 42 $(( s[0] )) $(( s[1] ))...N-I'), Assertion(type='stdout', value='42 0 4\nstatus=0', shells=['zsh'], variant='BUG')], line_number=1005, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: s[0] with string 42 (line 1005)
E           
E           stdout mismatch:
E             expected: '42 42 0\nstatus=0'
E             actual:   '42 0 0\nstatus=0'
E           
E           Expected stdout: '42 42 0\nstatus=0'
E           Actual stdout:   '42 0 0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           s='42'
E           echo 42 $(( s[0] )) $(( s[1] ))
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[arith.test.sh::s[0] with string '12 34'[L1024]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909d5b0>
test_file = 'arith.test.sh'
test_case = TestCase(name="s[0] with string '12 34'", script="s='12 34'\necho '12 34' $(( s[0] )) $(( s[1] ))\necho status=$?\n\n\...riant='BUG'), Assertion(type='stdout', value='status=1', shells=['bash'], variant='BUG')], line_number=1024, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: s[0] with string '12 34' (line 1024)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   '12 34 0 0\nstatus=0'
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   '12 34 0 0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           s='12 34'
E           echo '12 34' $(( s[0] )) $(( s[1] ))
E           echo status=$?
E           
E           
E           
E           
E           # bash prints an error, but doesn't fail
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assign.test.sh::Indexed LHS without spaces, and +=[L6]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909d670>
test_file = 'array-assign.test.sh'
test_case = TestCase(name='Indexed LHS without spaces, and +=', script='a[1]=x\necho status=$?\nargv.py "${a[@]}"\n\na[0+2]=y\n#a[...variant='N-I'), Assertion(type='stdout', value='status=127', shells=['ash'], variant='N-I')], line_number=6, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Indexed LHS without spaces, and += (line 6)
E           
E           stdout mismatch:
E             expected: "status=0\n['x']\n['x', 'y']\n['x', 'yz']"
E             actual:   "status=0\n['x']\n['x']\n['x']"
E           
E           Expected stdout: "status=0\n['x']\n['x', 'y']\n['x', 'yz']"
E           Actual stdout:   "status=0\n['x']\n['x']\n['x']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a[1]=x
E           echo status=$?
E           argv.py "${a[@]}"
E           
E           a[0+2]=y
E           #a[2|3]=y  # zsh doesn't allow this
E           argv.py "${a[@]}"
E           
E           # += does appending
E           a[0+2]+=z
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assign.test.sh::Indexed LHS with spaces[L31]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909d730>
test_file = 'array-assign.test.sh'
test_case = TestCase(name='Indexed LHS with spaces', script='case $SH in zsh|ash) exit ;; esac\n\na[1 * 1]=x\na[ 1 + 2 ]=z\necho s...e, variant=None), Assertion(type='stdout', value='', shells=['zsh', 'ash'], variant='N-I')], line_number=31, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Indexed LHS with spaces (line 31)
E           
E           stdout mismatch:
E             expected: "status=0\n['x', 'z']"
E             actual:   'status=1\n[]'
E           
E           Expected stdout: "status=0\n['x', 'z']"
E           Actual stdout:   'status=1\n[]\n'
E           Expected stderr: None
E           Actual stderr:   'bash: a[1: command not found\nbash: a[: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh|ash) exit ;; esac
E           
E           a[1 * 1]=x
E           a[ 1 + 2 ]=z
E           echo status=$?
E           
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assign.test.sh::Nested a[i[0]]=0[L46]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909d7f0>
test_file = 'array-assign.test.sh'
test_case = TestCase(name='Nested a[i[0]]=0', script='i=(0 1 2)\n\na[i[0]]=0\na[ i[1] ]=1\na[ i[2] ]=2\na[ i[1]+i[2] ]=3\n\nargv.p...e, variant=None), Assertion(type='stdout', value='', shells=['zsh', 'ash'], variant='N-I')], line_number=46, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Nested a[i[0]]=0 (line 46)
E           
E           stdout mismatch:
E             expected: "['0', '1', '2', '3']"
E             actual:   '[]'
E           
E           Expected stdout: "['0', '1', '2', '3']"
E           Actual stdout:   '[]\n'
E           Expected stderr: None
E           Actual stderr:   'bash: a[: command not found\nbash: a[: command not found\nbash: a[: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           i=(0 1 2)
E           
E           a[i[0]]=0
E           a[ i[1] ]=1
E           a[ i[2] ]=2
E           a[ i[1]+i[2] ]=3
E           
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assign.test.sh::Multiple LHS array words[L63]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909d8b0>
test_file = 'array-assign.test.sh'
test_case = TestCase(name='Multiple LHS array words', script="case $SH in zsh|ash) exit ;; esac\n\na=(0 1 2)\nb=(3 4 5)\n\n#declar...], variant='OK'), Assertion(type='stdout', value='', shells=['zsh', 'ash'], variant='N-I')], line_number=63, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Multiple LHS array words (line 63)
E           
E           stdout mismatch:
E             expected: 'declare -a a=([0]="0" [1]="" [2]="2")\ndeclare -a b=([0]="3" [1]="4" [2]="/home/spec-test/src")\n---\n[\'b[2\', \'+\', \'0]=bar\']\nstatus=0\ndeclare -a a=([0]="0" [1]="" [2]="2")\ndeclare -a b=([0]="3" [1]="4" [2]="/home/spec-test/src")'
E             actual:   'declare -a a=([0]="0" [1]="1" [2]="2")\ndeclare -a b=([0]="3" [1]="4" [2]="5")\n---\nstatus=1\ndeclare -a a=([0]="0" [1]="1" [2]="2")\ndeclare -a b=([0]="3" [1]="4" [2]="5")'
E           
E           Expected stdout: 'declare -a a=([0]="0" [1]="" [2]="2")\ndeclare -a b=([0]="3" [1]="4" [2]="/home/spec-test/src")\n---\n[\'b[2\', \'+\', \'0]=bar\']\nstatus=0\ndeclare -a a=([0]="0" [1]="" [2]="2")\ndeclare -a b=([0]="3" [1]="4" [2]="/home/spec-test/src")'
E           Actual stdout:   'declare -a a=([0]="0" [1]="1" [2]="2")\ndeclare -a b=([0]="3" [1]="4" [2]="5")\n---\nstatus=1\ndeclare -a a=([0]="0" [1]="1" [2]="2")\ndeclare -a b=([0]="3" [1]="4" [2]="5")\n'
E           Expected stderr: None
E           Actual stderr:   'bash: a[0: command not found\nbash: a[0: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh|ash) exit ;; esac
E           
E           a=(0 1 2)
E           b=(3 4 5)
E           
E           #declare -p a b
E           
E           HOME=/home/spec-test
E           
E           # empty string, and tilde sub
E           a[0 + 1]=  b[2 + 0]=~/src
E           
E           typeset -p a b
E           
E           echo ---
E           
E           # In bash, this bad prefix binding prints an error, but nothing fails
E           a[0 + 1]='foo' argv.py b[2 + 0]='bar'
E           echo status=$?
E           
E           typeset -p a b
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assign.test.sh::LHS array is protected with shopt -s eval_unsafe_arith, e.g. 'a[$(echo 2)]'[L121]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909d970>
test_file = 'array-assign.test.sh'
test_case = TestCase(name="LHS array is protected with shopt -s eval_unsafe_arith, e.g. 'a[$(echo 2)]'", script='case $SH in zsh|a..., variant='OK'), Assertion(type='stdout', value='', shells=['zsh', 'ash'], variant='N-I')], line_number=121, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: LHS array is protected with shopt -s eval_unsafe_arith, e.g. 'a[$(echo 2)]' (line 121)
E           
E           stdout mismatch:
E             expected: 'declare -a b=([0]="3" [1]="4" [2]="5")\nget 5\nset zzz\ndeclare -a b=([0]="3" [1]="4" [2]="zzz")'
E             actual:   'declare -a b=([0]="3" [1]="4" [2]="5")\nget 3\nset 3\ndeclare -a b=([0]="3" [1]="4" [2]="5" [expr]="zzz")'
E           
E           Expected stdout: 'declare -a b=([0]="3" [1]="4" [2]="5")\nget 5\nset zzz\ndeclare -a b=([0]="3" [1]="4" [2]="zzz")'
E           Actual stdout:   'declare -a b=([0]="3" [1]="4" [2]="5")\nget 3\nset 3\ndeclare -a b=([0]="3" [1]="4" [2]="5" [expr]="zzz")\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh|ash) exit ;; esac
E           
E           a=(0 1 2)
E           b=(3 4 5)
E           typeset -p b
E           
E           expr='a[$(echo 2)]' 
E           
E           echo 'get' "${b[expr]}"
E           
E           b[expr]=zzz
E           
E           echo 'set' "${b[expr]}"
E           typeset -p b
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assign.test.sh::file named a[ is  not executed[L161]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909da30>
test_file = 'array-assign.test.sh'
test_case = TestCase(name='file named a[ is  not executed', script='case $SH in zsh|ash) exit ;; esac\n\nPATH=".:$PATH"\n\nfor nam... variant='N-I'), Assertion(type='stdout', value='', shells=['zsh', 'ash'], variant='N-I')], line_number=161, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: file named a[ is  not executed (line 161)
E           
E           stdout mismatch:
E             expected: 'len=2'
E             actual:   'len=0'
E           
E           Expected stdout: 'len=2'
E           Actual stdout:   'len=0\n'
E           Expected stderr: None
E           Actual stderr:   'bash: a[5: command not found\nbash: a[5: command not found\nbash: a[5: command not found\n'
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           case $SH in zsh|ash) exit ;; esac
E           
E           PATH=".:$PATH"
E           
E           for name in 'a[' 'a[5'; do
E             echo "echo hi from $name: \$# args: \$@" > "$name"
E             chmod +x "$name"
E           done
E           
E           # this does not executed a[5
E           a[5 + 1]=
E           a[5 / 1]=y
E           echo len=${#a[@]}
E           
E           # Not detected as assignment because there's a non-arith character
E           # bash and mksh both give a syntax error
E           a[5 # 1]=
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assign.test.sh::More fragments like a[  a[5  a[5 +  a[5 + 3][L189]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909daf0>
test_file = 'array-assign.test.sh'
test_case = TestCase(name='More fragments like a[  a[5  a[5 +  a[5 + 3]', script='for name in \'a[\' \'a[5\'; do\n  echo "echo hi ... status=127\na[5 + 3]+ status=127\na[5 + 3]+= status=127', shells=['ash'], variant='N-I')], line_number=189, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: More fragments like a[  a[5  a[5 +  a[5 + 3] (line 189)
E           
E           stdout mismatch:
E             expected: 'a[ status=2\na[5 status=2\na[5 + status=2\na[5 + 3] status=127\na[5 + 3]= status=0\na[5 + 3]+ status=127\na[5 + 3]+= status=0'
E             actual:   'a[ status=1\na[5 status=1\na[5 + status=1\na[5 + 3] status=1\na[5 + 3]= status=1\na[5 + 3]+ status=1\na[5 + 3]+= status=1'
E           
E           Expected stdout: 'a[ status=2\na[5 status=2\na[5 + status=2\na[5 + 3] status=127\na[5 + 3]= status=0\na[5 + 3]+ status=127\na[5 + 3]+= status=0'
E           Actual stdout:   'a[ status=1\na[5 status=1\na[5 + status=1\na[5 + 3] status=1\na[5 + 3]= status=1\na[5 + 3]+ status=1\na[5 + 3]+= status=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: a[: command not found\nbash: a[5: command not found\nbash: a[5: command not found\nbash: a[5: command not found\nbash: a[5: command not found\nbash: a[5: command not found\nbash: a[5: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           for name in 'a[' 'a[5'; do
E             echo "echo hi from $name: \$# args: \$@" > "$name"
E             chmod +x "$name"
E           done
E           
E           # syntax error in bash
E           $SH -c 'a['
E           echo "a[ status=$?"
E           
E           $SH -c 'a[5'
E           echo "a[5 status=$?"
E           
E           # 1 arg +
E           $SH -c 'a[5 +'
E           echo "a[5 + status=$?"
E           
E           # 2 args
E           $SH -c 'a[5 + 3]'
E           echo "a[5 + 3] status=$?"
E           
E           $SH -c 'a[5 + 3]='
E           echo "a[5 + 3]= status=$?"
E           
E           $SH -c 'a[5 + 3]+'
E           echo "a[5 + 3]+ status=$?"
E           
E           $SH -c 'a[5 + 3]+='
E           echo "a[5 + 3]+= status=$?"
E           
E           # mksh doesn't issue extra parse errors
E           # and it doesn't turn a[5 + 3] and a[5 + 3]+ into commands!
E           
E           
E           
E           # in zsh, everything becomes "bad pattern"
E           
E           
E           # ash behavior is consistent
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assign.test.sh::Are quotes allowed?[L267]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909dbb0>
test_file = 'array-assign.test.sh'
test_case = TestCase(name='Are quotes allowed?', script='# double quotes allowed in bash\na["1"]=2\necho status=$? len=${#a[@]}\n\...=0 len=1\nstatus=0 len=2\nstatus=0 len=3\nstatus=0 len=4', shells=['bash'], variant='OK')], line_number=267, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Are quotes allowed? (line 267)
E           
E           stdout mismatch:
E             expected: 'status=0 len=1\nstatus=1 len=1\nstatus=0 len=2\nstatus=1 len=2'
E             actual:   'status=0 len=0\nstatus=0 len=0\nstatus=1 len=0\nstatus=1 len=0'
E           
E           Expected stdout: 'status=0 len=1\nstatus=1 len=1\nstatus=0 len=2\nstatus=1 len=2'
E           Actual stdout:   'status=0 len=0\nstatus=0 len=0\nstatus=1 len=0\nstatus=1 len=0\n'
E           Expected stderr: None
E           Actual stderr:   'bash: a[2: command not found\nbash: a[3: command not found\n'
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # double quotes allowed in bash
E           a["1"]=2
E           echo status=$? len=${#a[@]}
E           
E           a['2']=3
E           echo status=$? len=${#a[@]}
E           
E           # allowed in bash
E           a[2 + "3"]=5
E           echo status=$? len=${#a[@]}
E           
E           a[3 + '4']=5
E           echo status=$? len=${#a[@]}
E           
E           
E           
E           # syntax errors are not fatal in bash
E           
E           
E           # bash 3.2+ treats single quotes in array index as character values
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assign.test.sh::Tricky parsing - a[ a[0]=1 ]=X  a[ a[0]+=1 ]+=X[L308]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909dc70>
test_file = 'array-assign.test.sh'
test_case = TestCase(name='Tricky parsing - a[ a[0]=1 ]=X  a[ a[0]+=1 ]+=X', script="# the nested [] means we can't use regular la...t=None), Assertion(type='stdout', value='', shells=['zsh', 'mksh', 'ash'], variant='N-I')], line_number=308, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Tricky parsing - a[ a[0]=1 ]=X  a[ a[0]+=1 ]+=X (line 308)
E           
E           stdout mismatch:
E             expected: 'assign=42\ndeclare -a a=([0]="1" [1]="X")\ndeclare -a a=([0]="1" [1]="X" [2]="3" [3]="Y")\n---\ndeclare -a a=([0]="2" [1]="X" [2]="3X" [3]="Y")'
E             actual:   'assign=42\n---'
E           
E           Expected stdout: 'assign=42\ndeclare -a a=([0]="1" [1]="X")\ndeclare -a a=([0]="1" [1]="X" [2]="3" [3]="Y")\n---\ndeclare -a a=([0]="2" [1]="X" [2]="3X" [3]="Y")'
E           Actual stdout:   'assign=42\n---\n'
E           Expected stderr: None
E           Actual stderr:   'bash: a[a[0]=1]=X: command not found\nbash: declare: a: not found\nbash: a[: command not found\nbash: declare: a: not found\nbash: a[: command not found\nbash: declare: a: not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # the nested [] means we can't use regular language lookahead?
E           
E           echo assign=$(( z[0] = 42 ))
E           
E           a[a[0]=1]=X
E           declare -p a
E           
E           a[ a[2]=3 ]=Y
E           declare -p a
E           
E           echo ---
E           
E           a[ a[0]+=1 ]+=X
E           declare -p a
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assign.test.sh::argv.py a[1 + 2]=[L335]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909dd30>
test_file = 'array-assign.test.sh'
test_case = TestCase(name='argv.py a[1 + 2]=', script="case $SH in zsh|ash) exit ;; esac\n\n# This tests that the worse parser doe..., variant=None), Assertion(type='stdout', value='', shells=['zsh', 'ash'], variant='N-I')], line_number=335, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: argv.py a[1 + 2]= (line 335)
E           
E           stdout mismatch:
E             expected: "['a[1', '+', '2]=']\nstatus=0\n['a[1', '+', '2]+=']\nstatus=0\n['a[3', '+', '4]=']\n['a[3', '+', '4]+=']"
E             actual:   "status=1\nstatus=1\n['a[3', '+', '4]=']\n['a[3', '+', '4]+=']"
E           
E           Expected stdout: "['a[1', '+', '2]=']\nstatus=0\n['a[1', '+', '2]+=']\nstatus=0\n['a[3', '+', '4]=']\n['a[3', '+', '4]+=']"
E           Actual stdout:   "status=1\nstatus=1\n['a[3', '+', '4]=']\n['a[3', '+', '4]+=']\n"
E           Expected stderr: None
E           Actual stderr:   'bash: a[1: command not found\nbash: a[1: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh|ash) exit ;; esac
E           
E           # This tests that the worse parser doesn't unconditinoally treat a[ as special
E           
E           a[1 + 2]= argv.py a[1 + 2]=
E           echo status=$?
E           
E           a[1 + 2]+= argv.py a[1 + 2]+=
E           echo status=$?
E           
E           argv.py a[3 + 4]=
E           
E           argv.py a[3 + 4]+=
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assign.test.sh::declare builtin doesn't allow spaces[L362]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909ddf0>
test_file = 'array-assign.test.sh'
test_case = TestCase(name="declare builtin doesn't allow spaces", script="case $SH in zsh|mksh|ash) exit ;; esac\n\n# OSH doesn't ...t='OK'), Assertion(type='stdout', value='', shells=['zsh', 'mksh', 'ash'], variant='N-I')], line_number=362, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: declare builtin doesn't allow spaces (line 362)
E           
E           stdout mismatch:
E             expected: 'declare -a a=([0]="1" [1]="X")\ndeclare -a a=([0]="1" [1]="X" [2]="3")'
E             actual:   ''
E           
E           Expected stdout: 'declare -a a=([0]="1" [1]="X")\ndeclare -a a=([0]="1" [1]="X" [2]="3")'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   "bash: declare: a: not found\nbash: declare: `a[': not a valid identifier\nbash: declare: `]=Y': not a valid identifier\nbash: declare: a: not found\n"
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           case $SH in zsh|mksh|ash) exit ;; esac
E           
E           # OSH doesn't allow this
E           declare a[a[0]=1]=X
E           declare -p a
E           
E           # neither bash nor OSH allow this
E           declare a[ a[2]=3 ]=Y
E           declare -p a
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::Literal syntax ([x]=y)[L19]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909deb0>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='Literal syntax ([x]=y)', script='declare -A a\na=([aa]=b [foo]=bar [\'a+1\']=c)\necho ${a["aa"]}\necho ...+1"]}', assertions=[Assertion(type='stdout', value='b\nbar\nc', shells=None, variant=None)], line_number=19, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Literal syntax ([x]=y) (line 19)
E           
E           stdout mismatch:
E             expected: 'b\nbar\nc'
E             actual:   ''
E           
E           Expected stdout: 'b\nbar\nc'
E           Actual stdout:   '\n\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A a
E           a=([aa]=b [foo]=bar ['a+1']=c)
E           echo ${a["aa"]}
E           echo ${a["foo"]}
E           echo ${a["a+1"]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::Can initialize assoc array with the "(key value ...)" sequence[L50]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909e030>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='Can initialize assoc array with the "(key value ...)" sequence', script='declare -A A=(1 2 3)\necho sta...ssertion(type='stdout', value='status=0\ndeclare -A A=()', shells=['bash'], variant='BUG')], line_number=50, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Can initialize assoc array with the "(key value ...)" sequence (line 50)
E           
E           stdout mismatch:
E             expected: 'status=0\ndeclare -A A=()'
E             actual:   'status=0\ndeclare -A A=([0]="1" [1]="2" [2]="3")'
E           
E           Expected stdout: 'status=0\ndeclare -A A=()'
E           Actual stdout:   'status=0\ndeclare -A A=([0]="1" [1]="2" [2]="3")\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A A=(1 2 3)
E           echo status=$?
E           declare -p A
E           
E           # bash-4.4 prints warnings to stderr but gives no indication of the problem
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::retrieve keys with ![L84]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909e270>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='retrieve keys with !', script='declare -A a\nvar=\'x\'\na["$var"]=b\na[\'foo\']=bar\na[\'a+1\']=c\nfor ...ort', assertions=[Assertion(type='stdout', value='a+1\nfoo\nx', shells=None, variant=None)], line_number=84, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: retrieve keys with ! (line 84)
E           
E           stdout mismatch:
E             expected: 'a+1\nfoo\nx'
E             actual:   '"$var"\n\'a+1\'\n\'foo\''
E           
E           Expected stdout: 'a+1\nfoo\nx'
E           Actual stdout:   '"$var"\n\'a+1\'\n\'foo\'\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A a
E           var='x'
E           a["$var"]=b
E           a['foo']=bar
E           a['a+1']=c
E           for key in "${!a[@]}"; do
E             echo $key
E           done | sort
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::coerce to string with ${A[*]}, etc.[L114]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909e3f0>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='coerce to string with ${A[*]}, etc.', script='declare -A A\nA[\'X X\']=xx\nA[\'Y Y\']=yy\nargv.py "${A[...="['xx yy']\n['X X Y Y']\n['xx', 'yy']\n['X', 'X', 'Y', 'Y']", shells=None, variant=None)], line_number=114, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: coerce to string with ${A[*]}, etc. (line 114)
E           
E           stdout mismatch:
E             expected: "['xx yy']\n['X X Y Y']\n['xx', 'yy']\n['X', 'X', 'Y', 'Y']"
E             actual:   '[\'xx yy\']\n["\'X X\' \'Y Y\'"]\n[\'xx\', \'yy\']\n["\'X", "X\'", "\'Y", "Y\'"]'
E           
E           Expected stdout: "['xx yy']\n['X X Y Y']\n['xx', 'yy']\n['X', 'X', 'Y', 'Y']"
E           Actual stdout:   '[\'xx yy\']\n["\'X X\' \'Y Y\'"]\n[\'xx\', \'yy\']\n["\'X", "X\'", "\'Y", "Y\'"]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A A
E           A['X X']=xx
E           A['Y Y']=yy
E           argv.py "${A[*]}"
E           argv.py "${!A[*]}"
E           
E           argv.py ${A[@]}
E           argv.py ${!A[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[array-assoc.test.sh::${A[@]/b/B}[L130]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909e4b0>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='${A[@]/b/B}', script='# but ${!A[@]/b/B} doesn\'t work\ndeclare -A A\nA[\'aa\']=bbb\nA[\'bb\']=ccc\nA[\...', assertions=[Assertion(type='stdout', value='BBB\nccc\nddd', shells=None, variant=None)], line_number=130, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${A[@]/b/B} (line 130)
E           
E           stdout mismatch:
E             expected: 'BBB\nccc\nddd'
E             actual:   'BBB ccc ddd'
E           
E           Expected stdout: 'BBB\nccc\nddd'
E           Actual stdout:   'BBB ccc ddd\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # but ${!A[@]/b/B} doesn't work
E           declare -A A
E           A['aa']=bbb
E           A['bb']=ccc
E           A['cc']=ddd
E           for val in "${A[@]//b/B}"; do
E             echo $val
E           done | sort
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::${A[@]#prefix}[L145]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909e570>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='${A[@]#prefix}', script='declare -A A\nA[\'aa\']=one\nA[\'bb\']=two\nA[\'cc\']=three\nfor val in "${A[@...', assertions=[Assertion(type='stdout', value='hree\none\nwo', shells=None, variant=None)], line_number=145, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${A[@]#prefix} (line 145)
E           
E           stdout mismatch:
E             expected: 'hree\none\nwo'
E             actual:   'one wo hree'
E           
E           Expected stdout: 'hree\none\nwo'
E           Actual stdout:   'one wo hree\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A A
E           A['aa']=one
E           A['bb']=two
E           A['cc']=three
E           for val in "${A[@]#t}"; do
E             echo $val
E           done | sort
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::lookup with ${a[0]} -- "0" is a string[L185]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909e7b0>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='lookup with ${a[0]} -- "0" is a string', script='declare -A a\na["0"]=a\na["1"]=b\na["2"]=c\necho 0 "${...}"', assertions=[Assertion(type='stdout', value='0 a 1 b 2 c', shells=None, variant=None)], line_number=185, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: lookup with ${a[0]} -- "0" is a string (line 185)
E           
E           stdout mismatch:
E             expected: '0 a 1 b 2 c'
E             actual:   '0  1  2'
E           
E           Expected stdout: '0 a 1 b 2 c'
E           Actual stdout:   '0  1  2 \n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A a
E           a["0"]=a
E           a["1"]=b
E           a["2"]=c
E           echo 0 "${a[0]}" 1 "${a[1]}" 2 "${a[2]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::lookup with double quoted strings "mykey"[L195]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909e870>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='lookup with double quoted strings "mykey"', script='declare -A a\na["aa"]=b\na["foo"]=bar\na[\'a+1\']=c...+1"]}"', assertions=[Assertion(type='stdout', value='b bar c', shells=None, variant=None)], line_number=195, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: lookup with double quoted strings "mykey" (line 195)
E           
E           stdout mismatch:
E             expected: 'b bar c'
E             actual:   'b bar'
E           
E           Expected stdout: 'b bar c'
E           Actual stdout:   'b bar \n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A a
E           a["aa"]=b
E           a["foo"]=bar
E           a['a+1']=c
E           echo "${a["aa"]}" "${a["foo"]}" "${a["a+1"]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::lookup with unquoted $key and quoted "$i$i"[L213]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909e9f0>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='lookup with unquoted $key and quoted "$i$i"', script='declare -A A\nA["aa"]=b\nA["foo"]=bar\n\nkey=foo\... in OSH', assertions=[Assertion(type='stdout', value='bar\nb', shells=None, variant=None)], line_number=213, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: lookup with unquoted $key and quoted "$i$i" (line 213)
E           
E           stdout mismatch:
E             expected: 'bar\nb'
E             actual:   ''
E           
E           Expected stdout: 'bar\nb'
E           Actual stdout:   '\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A A
E           A["aa"]=b
E           A["foo"]=bar
E           
E           key=foo
E           echo ${A[$key]}
E           i=a
E           echo ${A["$i$i"]}   # note: ${A[$i$i]} doesn't work in OSH
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::lookup by unquoted string doesn't work in OSH because it's a variable[L227]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909eab0>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name="lookup by unquoted string doesn't work in OSH because it's a variable", script='declare -A a\na["aa"]=b...bash'], variant='BUG'), Assertion(type='status', value=0, shells=['bash'], variant='BUG')], line_number=227, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: lookup by unquoted string doesn't work in OSH because it's a variable (line 227)
E           
E           stdout mismatch:
E             expected: 'c'
E             actual:   ''
E           
E           Expected stdout: 'c'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A a
E           a["aa"]=b
E           a["foo"]=bar
E           a['a+1']=c
E           echo "${a[a+1]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::bash bug: "i+1" and i+1 are the same key[L238]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909eb70>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='bash bug: "i+1" and i+1 are the same key', script='i=1\narray=(5 6 7)\necho array[i]="${array[i]}"\nech...soc[i+1]=string+1\nassoc[i]=string\nassoc[i+1]=string+1', shells=['bash'], variant='BUG')], line_number=238, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: bash bug: "i+1" and i+1 are the same key (line 238)
E           
E           stdout mismatch:
E             expected: 'array[i]=6\narray[i+1]=7\nassoc[i]=string\nassoc[i+1]=string+1\nassoc[i]=string\nassoc[i+1]=string+1'
E             actual:   'array[i]=6\narray[i+1]=7\nassoc[i]=1\nassoc[i+1]=1+1\nassoc[i]=i]}\nassoc[i+1]=i+1]}'
E           
E           Expected stdout: 'array[i]=6\narray[i+1]=7\nassoc[i]=string\nassoc[i+1]=string+1\nassoc[i]=string\nassoc[i+1]=string+1'
E           Actual stdout:   'array[i]=6\narray[i+1]=7\nassoc[i]=1\nassoc[i+1]=1+1\nassoc[i]=i]}\nassoc[i+1]=i+1]}\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           i=1
E           array=(5 6 7)
E           echo array[i]="${array[i]}"
E           echo array[i+1]="${array[i+1]}"
E           
E           # arithmetic does NOT work here in bash.  These are unquoted strings!
E           declare -A assoc
E           assoc[i]=$i
E           assoc[i+1]=$i+1
E           
E           assoc["i"]=string
E           assoc["i+1"]=string+1
E           
E           echo assoc[i]="${assoc[i]}" 
E           echo assoc[i+1]="${assoc[i+1]}"
E           
E           echo assoc[i]="${assoc["i"]}" 
E           echo assoc[i+1]="${assoc["i+1"]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::Indexed array as key of associative array coerces to string (without shopt -s strict_array)[L282]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909ecf0>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='Indexed array as key of associative array coerces to string (without shopt -s strict_array)', script='d..., assertions=[Assertion(type='stdout', value='foo\n1 2 3\n42', shells=None, variant=None)], line_number=282, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Indexed array as key of associative array coerces to string (without shopt -s strict_array) (line 282)
E           
E           stdout mismatch:
E             expected: 'foo\n1 2 3\n42'
E             actual:   'foo\n"${array[@]}"\n42'
E           
E           Expected stdout: 'foo\n1 2 3\n42'
E           Actual stdout:   'foo\n"${array[@]}"\n42\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -a array=(1 2 3)
E           declare -A assoc
E           assoc[42]=43
E           assoc["${array[@]}"]=foo
E           
E           echo "${assoc["${array[@]}"]}"
E           for entry in "${!assoc[@]}"; do
E             echo $entry
E           done | sort
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::Append to associative array value A['x']+='suffix'[L299]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909edb0>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name="Append to associative array value A['x']+='suffix'", script='declare -A A\nA[\'x\']=\'foo\'\nA[\'x\']+=...', assertions=[Assertion(type='stdout', value="['foobarbar']", shells=None, variant=None)], line_number=299, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Append to associative array value A['x']+='suffix' (line 299)
E           
E           stdout mismatch:
E             expected: "['foobarbar']"
E             actual:   "['']"
E           
E           Expected stdout: "['foobarbar']"
E           Actual stdout:   "['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A A
E           A['x']='foo'
E           A['x']+='bar'
E           A['x']+='bar'
E           argv.py "${A["x"]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::Slice of associative array doesn't make sense in bash[L309]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909ee70>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name="Slice of associative array doesn't make sense in bash", script='declare -A a\na[xx]=1\na[yy]=2\na[zz]=3... shells=None, variant=None), Assertion(type='status', value=1, shells=None, variant=None)], line_number=309, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Slice of associative array doesn't make sense in bash (line 309)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   "['1', '2', '3']\n['2', '3', '4']\n['3', '4', '5']\n['4', '5']\n['5']\n[]"
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   "['1', '2', '3']\n['2', '3', '4']\n['3', '4', '5']\n['4', '5']\n['5']\n[]\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A a
E           a[xx]=1
E           a[yy]=2
E           a[zz]=3
E           a[aa]=4
E           a[bb]=5
E           #argv.py ${a["xx"]}
E           argv.py ${a[@]: 0: 3}
E           argv.py ${a[@]: 1: 3}
E           argv.py ${a[@]: 2: 3}
E           argv.py ${a[@]: 3: 3}
E           argv.py ${a[@]: 4: 3}
E           argv.py ${a[@]: 5: 3}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::setting key to itself (from bash-bug mailing list)[L377]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909f230>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='setting key to itself (from bash-bug mailing list)', script='declare -A foo\nfoo=(["key"]="value1")\nec...t=None), Assertion(type='stdout', value='value1\nvalue2', shells=['bash'], variant='BUG')], line_number=377, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: setting key to itself (from bash-bug mailing list) (line 377)
E           
E           stdout mismatch:
E             expected: 'value1\nvalue2'
E             actual:   ''
E           
E           Expected stdout: 'value1\nvalue2'
E           Actual stdout:   '\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A foo
E           foo=(["key"]="value1")
E           echo ${foo["key"]}
E           foo=(["key"]="${foo["key"]} value2")
E           echo ${foo["key"]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::readonly associative array can't be modified[L392]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909f2f0>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name="readonly associative array can't be modified", script="declare -Ar A\nA['x']=1\necho status=$?\n# just-...h'], variant='OK'), Assertion(type='stdout', value='status=1', shells=None, variant=None)], line_number=392, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: readonly associative array can't be modified (line 392)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'status=0'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -Ar A
E           A['x']=1
E           echo status=$?
E           # just-bash treats readonly assignment as fatal (matches osh)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::associative array and brace expansion[L405]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909f3b0>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='associative array and brace expansion', script='declare -A A=([k1]=v [k2]=-{a,b}-)\necho ${A["k1"]}\nec..."]}', assertions=[Assertion(type='stdout', value='v\n-{a,b}-', shells=None, variant=None)], line_number=405, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: associative array and brace expansion (line 405)
E           
E           stdout mismatch:
E             expected: 'v\n-{a,b}-'
E             actual:   ''
E           
E           Expected stdout: 'v\n-{a,b}-'
E           Actual stdout:   '\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A A=([k1]=v [k2]=-{a,b}-)
E           echo ${A["k1"]}
E           echo ${A["k2"]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::unset -v and assoc array[L436]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909f530>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='unset -v and assoc array', script='shopt -s eval_unsafe_arith || true\n\nshow-len() {\n  echo len=${#as...pe='stdout', value='len=1\nlen=0\nlen=1\nlen=0\nlen=1\nlen=0', shells=None, variant=None)], line_number=436, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: unset -v and assoc array (line 436)
E           
E           stdout mismatch:
E             expected: 'len=1\nlen=0\nlen=1\nlen=0\nlen=1\nlen=0'
E             actual:   'len=1\nlen=1\nlen=1\nlen=1\nlen=1\nlen=1'
E           
E           Expected stdout: 'len=1\nlen=0\nlen=1\nlen=0\nlen=1\nlen=0'
E           Actual stdout:   'len=1\nlen=1\nlen=1\nlen=1\nlen=1\nlen=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s eval_unsafe_arith || true
E           
E           show-len() {
E             echo len=${#assoc[@]}
E           }
E           
E           declare -A assoc=(['K']=val)
E           show-len
E           
E           unset -v 'assoc["K"]'
E           show-len
E           
E           declare -A assoc=(['K']=val)
E           show-len
E           key=K
E           unset -v 'assoc[$key]'
E           show-len
E           
E           declare -A assoc=(['K']=val)
E           show-len
E           unset -v 'assoc[$(echo K)]'
E           show-len
E           
E           # ${prefix} doesn't work here, even though it does in arithmetic
E           #declare -A assoc=(['K']=val)
E           #show-len
E           #prefix=as
E           #unset -v '${prefix}soc[$key]'
E           #show-len
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::nameref and assoc array[L476]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909f5f0>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='nameref and assoc array', script='show-values() {\n  echo values: ${A[@]}\n}\n\ndeclare -A A=([\'K\']=v...l2\nvalues: val2\n---\nbefore val2\nafter val3\nvalues: val3', shells=None, variant=None)], line_number=476, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: nameref and assoc array (line 476)
E           
E           stdout mismatch:
E             expected: 'values: val\nbefore val\nafter val2\nvalues: val2\n---\nbefore val2\nafter val3\nvalues: val3'
E             actual:   'values: val\nbefore\nafter\nvalues: val\n---\nbefore\nafter\nvalues: val'
E           
E           Expected stdout: 'values: val\nbefore val\nafter val2\nvalues: val2\n---\nbefore val2\nafter val3\nvalues: val3'
E           Actual stdout:   'values: val\nbefore\nafter\nvalues: val\n---\nbefore\nafter\nvalues: val\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show-values() {
E             echo values: ${A[@]}
E           }
E           
E           declare -A A=(['K']=val)
E           show-values
E           
E           declare -n ref='A["K"]'
E           echo before $ref
E           ref='val2'
E           echo after $ref
E           show-values
E           
E           echo ---
E           
E           key=K
E           declare -n ref='A[$key]'
E           echo before $ref
E           ref='val3'
E           echo after $ref
E           show-values
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::${!ref} and assoc array[L510]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909f6b0>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='${!ref} and assoc array', script='show-values() {\n  echo values: ${A[@]}\n}\n\ndeclare -A A=([\'K\']=v...ssertion(type='stdout', value='values: val\nref val\nref val', shells=None, variant=None)], line_number=510, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${!ref} and assoc array (line 510)
E           
E           stdout mismatch:
E             expected: 'values: val\nref val\nref val'
E             actual:   'values: val\nref\nref'
E           
E           Expected stdout: 'values: val\nref val\nref val'
E           Actual stdout:   'values: val\nref\nref\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show-values() {
E             echo values: ${A[@]}
E           }
E           
E           declare -A A=(['K']=val)
E           show-values
E           
E           declare ref='A["K"]'
E           echo ref ${!ref}
E           
E           key=K
E           declare ref='A[$key]'
E           echo ref ${!ref}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::printf -v and assoc array[L531]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909f770>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='printf -v and assoc array', script='show-values() {\n  echo values: ${assoc[@]}\n}\n\ndeclare -A assoc=...'stdout', value='values: val\nvalues: /val2/\nvalues: /val3/', shells=None, variant=None)], line_number=531, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf -v and assoc array (line 531)
E           
E           stdout mismatch:
E             expected: 'values: val\nvalues: /val2/\nvalues: /val3/'
E             actual:   'values: val\nvalues: val\nvalues: val'
E           
E           Expected stdout: 'values: val\nvalues: /val2/\nvalues: /val3/'
E           Actual stdout:   'values: val\nvalues: val\nvalues: val\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show-values() {
E             echo values: ${assoc[@]}
E           }
E           
E           declare -A assoc=(['K']=val)
E           show-values
E           
E           printf -v 'assoc["K"]' '/%s/' val2
E           show-values
E           
E           key=K
E           printf -v 'assoc[$key]' '/%s/' val3
E           show-values
E           
E           # Somehow bash doesn't allow this
E           #prefix=as
E           #printf -v '${prefix}soc[$key]' '/%s/' val4
E           #show-values
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::bash bug: (( A["$key"] = 1 )) doesn't work[L557]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909f830>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='bash bug: (( A["$key"] = 1 )) doesn\'t work', script='declare -A A\n#A["$key"]=1\n\n# Works in both\n#A..., variant=None), Assertion(type='stdout', value='[]\n[]', shells=['bash'], variant='BUG')], line_number=557, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: bash bug: (( A["$key"] = 1 )) doesn't work (line 557)
E           
E           stdout mismatch:
E             expected: '[]\n[]'
E             actual:   "['0']\n['42']"
E           
E           Expected stdout: '[]\n[]'
E           Actual stdout:   "['0']\n['42']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A A
E           #A["$key"]=1
E           
E           # Works in both
E           #A["$key"]=42
E           
E           # Works in bash only
E           #(( A[\$key] = 42 ))
E           
E           (( A["$key"] = 42 ))
E           
E           argv.py "${!A[@]}"
E           argv.py "${A[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::Implicit increment of keys[L581]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909f8f0>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='Implicit increment of keys', script='declare -a arr=( [30]=a b [40]=x y)\nargv.py "${!arr[@]}"\nargv.py...dout', value="['30', '31', '40', '41']\n['a', 'b', 'x', 'y']", shells=None, variant=None)], line_number=581, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Implicit increment of keys (line 581)
E           
E           stdout mismatch:
E             expected: "['30', '31', '40', '41']\n['a', 'b', 'x', 'y']"
E             actual:   "['0', '1', '30', '40']\n['b', 'y', 'a', 'x']"
E           
E           Expected stdout: "['30', '31', '40', '41']\n['a', 'b', 'x', 'y']"
E           Actual stdout:   "['0', '1', '30', '40']\n['b', 'y', 'a', 'x']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -a arr=( [30]=a b [40]=x y)
E           argv.py "${!arr[@]}"
E           argv.py "${arr[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::test -v with dynamic parsing[L633]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909fa70>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='test -v with dynamic parsing', script="typeset -A assoc\nassoc=([empty]='' [k]=v)\n\nkey=empty\ntest -v...[Assertion(type='stdout', value='empty=0\nk=0\nnonexistent=1', shells=None, variant=None)], line_number=633, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: test -v with dynamic parsing (line 633)
E           
E           stdout mismatch:
E             expected: 'empty=0\nk=0\nnonexistent=1'
E             actual:   'empty=1\nk=1\nnonexistent=1'
E           
E           Expected stdout: 'empty=0\nk=0\nnonexistent=1'
E           Actual stdout:   'empty=1\nk=1\nnonexistent=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset -A assoc
E           assoc=([empty]='' [k]=v)
E           
E           key=empty
E           test -v 'assoc[$key]'
E           echo empty=$?
E           
E           key=k
E           test -v 'assoc[$key]'
E           echo k=$?
E           
E           key=nonexistent
E           test -v 'assoc[$key]'
E           echo nonexistent=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::BashAssoc a+=()[L743]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909fcb0>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='BashAssoc a+=()', script='declare -A a=([apple]=red [orange]=orange)\na+=([lemon]=yellow [banana]=yello... is red\norange is orange\nlemon is yellow\nbanana is yellow', shells=None, variant=None)], line_number=743, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: BashAssoc a+=() (line 743)
E           
E           stdout mismatch:
E             expected: 'apple is red\norange is orange\nlemon is yellow\nbanana is yellow'
E             actual:   'apple is\norange is\nlemon is\nbanana is'
E           
E           Expected stdout: 'apple is red\norange is orange\nlemon is yellow\nbanana is yellow'
E           Actual stdout:   'apple is \norange is \nlemon is \nbanana is \n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A a=([apple]=red [orange]=orange)
E           a+=([lemon]=yellow [banana]=yellow)
E           echo "apple is ${a['apple']}"
E           echo "orange is ${a['orange']}"
E           echo "lemon is ${a['lemon']}"
E           echo "banana is ${a['banana']}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-assoc.test.sh::BashAssoc ${a[@]@Q}[L759]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10909fd70>
test_file = 'array-assoc.test.sh'
test_case = TestCase(name='BashAssoc ${a[@]@Q}', script='declare -A a=()\na[\'symbol1\']=\\\'\\\'\na[\'symbol2\']=\'"\'\na[\'symbo...>&|\']\n[\'[]*?\' \'\'\\\'\'\'\\\'\'\' \'"\' \'()<>&|\']', shells=['bash'], variant='OK')], line_number=759, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: BashAssoc ${a[@]@Q} (line 759)
E           
E           stdout mismatch:
E             expected: '[\'[]*?\' \'\'\\\'\'\'\\\'\'\' \'"\' \'()<>&|\']\n[\'[]*?\' \'\'\\\'\'\'\\\'\'\' \'"\' \'()<>&|\']'
E             actual:   '[$\'\\\'\\\' " ()<>&| []*?\']\n[$\'\\\'\\\' " ()<>&| []*?\']'
E           
E           Expected stdout: '[\'[]*?\' \'\'\\\'\'\'\\\'\'\' \'"\' \'()<>&|\']\n[\'[]*?\' \'\'\\\'\'\'\\\'\'\' \'"\' \'()<>&|\']'
E           Actual stdout:   '[$\'\\\'\\\' " ()<>&| []*?\']\n[$\'\\\'\\\' " ()<>&| []*?\']\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A a=()
E           a['symbol1']=\'\'
E           a['symbol2']='"'
E           a['symbol3']='()<>&|'
E           a['symbol4']='[]*?'
E           echo "[${a[@]@Q}]"
E           echo "[${a[*]@Q}]"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-compat.test.sh::User arrays decay[L23]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c03b0>
test_file = 'array-compat.test.sh'
test_case = TestCase(name='User arrays decay', script='declare -a a b\na=(x y z)\nb="${a[@]}"  # this collapses to a string\nc=("$...stdout', value="['x', 'y', 'z']\n['x y z']\n['x', 'YYY', 'z']", shells=None, variant=None)], line_number=23, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: User arrays decay (line 23)
E           
E           stdout mismatch:
E             expected: "['x', 'y', 'z']\n['x y z']\n['x', 'YYY', 'z']"
E             actual:   "['x', 'y', 'z']\n['x y z']\n['x y z', 'YYY']"
E           
E           Expected stdout: "['x', 'y', 'z']\n['x y z']\n['x', 'YYY', 'z']"
E           Actual stdout:   "['x', 'y', 'z']\n['x y z']\n['x y z', 'YYY']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -a a b
E           a=(x y z)
E           b="${a[@]}"  # this collapses to a string
E           c=("${a[@]}")  # this preserves the array
E           c[1]=YYY  # mutate a copy -- doesn't affect the original
E           argv.py "${a[@]}"
E           argv.py "${b}"
E           argv.py "${c[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-compat.test.sh::++ on a whole array increments the first element (disallowed with strict_array)[L84]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c0830>
test_file = 'array-compat.test.sh'
test_case = TestCase(name='++ on a whole array increments the first element (disallowed with strict_array)', script='shopt -s stri...sh'], variant='OK'), Assertion(type='stdout-json', value='', shells=['osh'], variant='OK')], line_number=84, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ++ on a whole array increments the first element (disallowed with strict_array) (line 84)
E           
E           stdout mismatch:
E             expected: '2 10'
E             actual:   '1 10'
E           
E           Expected stdout: '2 10'
E           Actual stdout:   '1 10\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s strict_array
E           
E           a=(1 10)
E           (( a++ ))  # doesn't make sense
E           echo "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-compat.test.sh::value.BashArray internal representation - Indexed[L106]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c09b0>
test_file = 'array-compat.test.sh'
test_case = TestCase(name='value.BashArray internal representation - Indexed', script='case $SH in mksh) exit ;; esac\n\nz=()\ndec...s=None, variant=None), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=106, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: value.BashArray internal representation - Indexed (line 106)
E           
E           stdout mismatch:
E             expected: 'declare -a z=()\ndeclare -a z=([0]="b" [1]="c")\ndeclare -a z=([0]="b" [1]="c" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b-mystr" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b-mystr" [1]="ZZZ-append" [5]="d" [6]="f" [7]="g")\n[\'keys\', \'0\', \'1\', \'5\', \'6\', \'7\']\n[\'values\', \'b-mystr\', \'ZZZ-append\', \'d\', \'f\', \'g\']\nstatus=1'
E             actual:   'declare -- z=""\ndeclare -a z=([0]="b" [1]="c")\ndeclare -a z=([0]="b" [1]="c" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b" [1]="ZZZ-append" [5]="d" [6]="f" [7]="g")\n[\'keys\', \'0\', \'1\', \'5\', \'6\', \'7\']\n[\'values\', \'b\', \'ZZZ-append\', \'d\', \'f\', \'g\']\ndeclare -a z=([0]="b" [1]="ZZZ-append" [5]="d" [6]="f" [7]="g")\nstatus=0'
E           
E           Expected stdout: 'declare -a z=()\ndeclare -a z=([0]="b" [1]="c")\ndeclare -a z=([0]="b" [1]="c" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b-mystr" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b-mystr" [1]="ZZZ-append" [5]="d" [6]="f" [7]="g")\n[\'keys\', \'0\', \'1\', \'5\', \'6\', \'7\']\n[\'values\', \'b-mystr\', \'ZZZ-append\', \'d\', \'f\', \'g\']\nstatus=1'
E           Actual stdout:   'declare -- z=""\ndeclare -a z=([0]="b" [1]="c")\ndeclare -a z=([0]="b" [1]="c" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b" [1]="ZZZ-append" [5]="d" [6]="f" [7]="g")\n[\'keys\', \'0\', \'1\', \'5\', \'6\', \'7\']\n[\'values\', \'b\', \'ZZZ-append\', \'d\', \'f\', \'g\']\ndeclare -a z=([0]="b" [1]="ZZZ-append" [5]="d" [6]="f" [7]="g")\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           z=()
E           declare -a | grep z=
E           
E           z+=(b c)
E           declare -a | grep z=
E           
E           # z[5]= finds the index, or puts it in SORTED order I think
E           z[5]=d
E           declare -a | grep z=
E           
E           z[1]=ZZZ
E           declare -a | grep z=
E           
E           # Adds after last index
E           z+=(f g)
E           declare -a | grep z=
E           
E           # This is the equivalent of z[0]+=mystr
E           z+=-mystr
E           declare -a | grep z=
E           
E           z[1]+=-append
E           declare -a | grep z=
E           
E           argv.py keys "${!z[@]}"  # 0 1 5 6 7
E           argv.py values "${z[@]}"
E           
E           # can't do this conversion
E           declare -A z
E           declare -A | grep z=
E           
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-compat.test.sh::value.BashArray internal representation - Assoc (ordering is a problem)[L158]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c0a70>
test_file = 'array-compat.test.sh'
test_case = TestCase(name='value.BashArray internal representation - Assoc (ordering is a problem)', script='case $SH in mksh) exi...ash'], variant='BUG'), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=158, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: value.BashArray internal representation - Assoc (ordering is a problem) (line 158)
E           
E           stdout mismatch:
E             expected: 'declare -A A=([k]="v" )\n[\'keys\', \'k\']\n[\'values\', \'v\']'
E             actual:   'declare -A A=([k]="v")\n[\'keys\', \'k\']\n[\'values\', \'v\']'
E           
E           Expected stdout: 'declare -A A=([k]="v" )\n[\'keys\', \'k\']\n[\'values\', \'v\']'
E           Actual stdout:   'declare -A A=([k]="v")\n[\'keys\', \'k\']\n[\'values\', \'v\']\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           declare -A A=([k]=v)
E           declare -A | grep A=
E           
E           argv.py keys "${!A[@]}"
E           argv.py values "${A[@]}"
E           
E           exit
E           
E           # Huh this actually works, we don't support it
E           # Hm the order here is all messed up, in bash 5.2
E           A+=([k2]=v2 [0]=foo [9]=9 [9999]=9999)
E           declare -A | grep A=
E           
E           A+=-append
E           declare -A | grep A=
E           
E           argv.py keys "${!A[@]}"
E           argv.py values "${A[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-literal.test.sh::Tilde expansions in RHS of [k]=v (BashArray)[L3]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c0b30>
test_file = 'array-literal.test.sh'
test_case = TestCase(name='Tilde expansions in RHS of [k]=v (BashArray)', script='HOME=/home/user\na=([2]=~ [4]=~:~:~)\necho "${a[...='stdout', value='/home/user\n/home/user:/home/user:/home/user', shells=None, variant=None)], line_number=3, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Tilde expansions in RHS of [k]=v (BashArray) (line 3)
E           
E           stdout mismatch:
E             expected: '/home/user\n/home/user:/home/user:/home/user'
E             actual:   '~\n~:~:~'
E           
E           Expected stdout: '/home/user\n/home/user:/home/user:/home/user'
E           Actual stdout:   '~\n~:~:~\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           HOME=/home/user
E           a=([2]=~ [4]=~:~:~)
E           echo "${a[2]}"
E           echo "${a[4]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-literal.test.sh::[k]=$v and [k]="$@" (BashArray)[L44]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c0d70>
test_file = 'array-literal.test.sh'
test_case = TestCase(name='[k]=$v and [k]="$@" (BashArray)', script='i=5\nv=\'1 2 3\'\na=($v [i]=$v)\nprintf \'keys: \'; argv.py "...]\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']", shells=None, variant=None)], line_number=44, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [k]=$v and [k]="$@" (BashArray) (line 44)
E           
E           stdout mismatch:
E             expected: "keys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '1 2 3']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']"
E             actual:   "keys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']"
E           
E           Expected stdout: "keys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '1 2 3']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']"
E           Actual stdout:   "keys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           i=5
E           v='1 2 3'
E           a=($v [i]=$v)
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           
E           x=(3 5 7)
E           a=($v [i]="${x[*]}")
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           a=($v [i]="${x[@]}")
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           a=($v [i]=${x[*]})
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           a=($v [i]=${x[@]})
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-literal.test.sh::[k]=$v and [k]="$@" (BashAssoc)[L77]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c0e30>
test_file = 'array-literal.test.sh'
test_case = TestCase(name='[k]=$v and [k]="$@" (BashAssoc)', script='i=5\nv=\'1 2 3\'\ndeclare -A a\na=([i]=$v)\nprintf \'keys: \'...]\nkeys: ['i']\nvals: ['3 5 7']\nkeys: ['i']\nvals: ['3 5 7']", shells=None, variant=None)], line_number=77, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [k]=$v and [k]="$@" (BashAssoc) (line 77)
E           
E           stdout mismatch:
E             expected: "keys: ['i']\nvals: ['1 2 3']\nkeys: ['i']\nvals: ['3 5 7']\nkeys: ['i']\nvals: ['3 5 7']\nkeys: ['i']\nvals: ['3 5 7']\nkeys: ['i']\nvals: ['3 5 7']"
E             actual:   "keys: ['i']\nvals: []\nkeys: ['i']\nvals: []\nkeys: ['i']\nvals: []\nkeys: ['i']\nvals: []\nkeys: ['i']\nvals: []"
E           
E           Expected stdout: "keys: ['i']\nvals: ['1 2 3']\nkeys: ['i']\nvals: ['3 5 7']\nkeys: ['i']\nvals: ['3 5 7']\nkeys: ['i']\nvals: ['3 5 7']\nkeys: ['i']\nvals: ['3 5 7']"
E           Actual stdout:   "keys: ['i']\nvals: []\nkeys: ['i']\nvals: []\nkeys: ['i']\nvals: []\nkeys: ['i']\nvals: []\nkeys: ['i']\nvals: []\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           i=5
E           v='1 2 3'
E           declare -A a
E           a=([i]=$v)
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           
E           x=(3 5 7)
E           a=([i]="${x[*]}")
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           a=([i]="${x[@]}")
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           a=([i]=${x[*]})
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           a=([i]=${x[@]})
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-literal.test.sh::append to element (BashArray)[L111]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c0ef0>
test_file = 'array-literal.test.sh'
test_case = TestCase(name='append to element (BashArray)', script='a=([hello]=1 [hello]+=2)\nprintf \'keys: \'; argv.py "${!a[@]}"...="keys: ['0']\nvals: ['12']\nkeys: ['0']\nvals: ['12:34:56']", shells=None, variant=None)], line_number=111, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: append to element (BashArray) (line 111)
E           
E           stdout mismatch:
E             expected: "keys: ['0']\nvals: ['12']\nkeys: ['0']\nvals: ['12:34:56']"
E             actual:   "keys: ['0', 'hello']\nvals: ['[hello]+=2']\nkeys: ['0', '1', '2', 'hello']\nvals: ['[hello]+=2', '[hello]+=:34', '[hello]+=:56']"
E           
E           Expected stdout: "keys: ['0']\nvals: ['12']\nkeys: ['0']\nvals: ['12:34:56']"
E           Actual stdout:   "keys: ['0', 'hello']\nvals: ['[hello]+=2']\nkeys: ['0', '1', '2', 'hello']\nvals: ['[hello]+=2', '[hello]+=:34', '[hello]+=:56']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=([hello]=1 [hello]+=2)
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           a+=([hello]+=:34 [hello]+=:56)
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-literal.test.sh::append to element (BashAssoc)[L125]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c0fb0>
test_file = 'array-literal.test.sh'
test_case = TestCase(name='append to element (BashAssoc)', script='declare -A a\nhello=100\na=([hello]=1 [hello]+=2)\nprintf \'key...ello']\nvals: ['2']\nkeys: ['hello']\nvals: ['2:34:56']", shells=['bash'], variant='BUG')], line_number=125, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: append to element (BashAssoc) (line 125)
E           
E           stdout mismatch:
E             expected: "keys: ['hello']\nvals: ['2']\nkeys: ['hello']\nvals: ['2:34:56']"
E             actual:   "keys: ['0', 'hello']\nvals: ['[hello]+=2']\nkeys: ['0', '1', '2', 'hello']\nvals: ['[hello]+=2', '[hello]+=:34', '[hello]+=:56']"
E           
E           Expected stdout: "keys: ['hello']\nvals: ['2']\nkeys: ['hello']\nvals: ['2:34:56']"
E           Actual stdout:   "keys: ['0', 'hello']\nvals: ['[hello]+=2']\nkeys: ['0', '1', '2', 'hello']\nvals: ['[hello]+=2', '[hello]+=:34', '[hello]+=:56']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A a
E           hello=100
E           a=([hello]=1 [hello]+=2)
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           a+=([hello]+=:34 [hello]+=:56)
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           # Bash >= 5.1 has a bug. Bash <= 5.0 is OK.
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-literal.test.sh::non-index forms of element (BashAssoc)[L148]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c1070>
test_file = 'array-literal.test.sh'
test_case = TestCase(name='non-index forms of element (BashAssoc)', script='declare -A a\na=([j]=1 2 3 4)\necho "status=$?"\nprint...2: a: 4: must use subscript when assigning associative array', shells=None, variant=None)], line_number=148, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: non-index forms of element (BashAssoc) (line 148)
E           
E           stdout mismatch:
E             expected: "status=0\nkeys: ['j']\nvals: ['1']"
E             actual:   "status=0\nkeys: ['0', '1', '2', 'j']\nvals: ['2', '3', '4']"
E           stderr mismatch:
E             expected: 'bash: line 2: a: 2: must use subscript when assigning associative array\nbash: line 2: a: 3: must use subscript when assigning associative array\nbash: line 2: a: 4: must use subscript when assigning associative array'
E             actual:   ''
E           
E           Expected stdout: "status=0\nkeys: ['j']\nvals: ['1']"
E           Actual stdout:   "status=0\nkeys: ['0', '1', '2', 'j']\nvals: ['2', '3', '4']\n"
E           Expected stderr: 'bash: line 2: a: 2: must use subscript when assigning associative array\nbash: line 2: a: 3: must use subscript when assigning associative array\nbash: line 2: a: 4: must use subscript when assigning associative array'
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A a
E           a=([j]=1 2 3 4)
E           echo "status=$?"
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-literal.test.sh::Evaluation order (1)[L166]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c1130>
test_file = 'array-literal.test.sh'
test_case = TestCase(name='Evaluation order (1)', script='# RHS of [k]=v are expanded when the initializer list is instanciated.  ...', value="keys: ['104', '205', '306']\nvals: ['1', '2', '3']", shells=None, variant=None)], line_number=166, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Evaluation order (1) (line 166)
E           
E           stdout mismatch:
E             expected: "keys: ['104', '205', '306']\nvals: ['1', '2', '3']"
E             actual:   "keys: ['100+i++', '200+i++', '300+i++']\nvals: []"
E           
E           Expected stdout: "keys: ['104', '205', '306']\nvals: ['1', '2', '3']"
E           Actual stdout:   "keys: ['100+i++', '200+i++', '300+i++']\nvals: []\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # RHS of [k]=v are expanded when the initializer list is instanciated.  For the
E           # indexed array, the array indices are evaluated when the array is modified.
E           i=1
E           a=([100+i++]=$((i++)) [200+i++]=$((i++)) [300+i++]=$((i++)))
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-literal.test.sh::Evaluation order (2)[L178]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c11f0>
test_file = 'array-literal.test.sh'
test_case = TestCase(name='Evaluation order (2)', script='# When evaluating the index, the modification to the array by the previo...alue="keys: ['0', '6', '10']\nvals: ['1+2+3', '10', 'hello']", shells=None, variant=None)], line_number=178, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Evaluation order (2) (line 178)
E           
E           stdout mismatch:
E             expected: "keys: ['0', '6', '10']\nvals: ['1+2+3', '10', 'hello']"
E             actual:   "keys: ['0', '1', '2']\nvals: ['1+2+3', '[a[0]]=10', '[a[6]]=hello']"
E           
E           Expected stdout: "keys: ['0', '6', '10']\nvals: ['1+2+3', '10', 'hello']"
E           Actual stdout:   "keys: ['0', '1', '2']\nvals: ['1+2+3', '[a[0]]=10', '[a[6]]=hello']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # When evaluating the index, the modification to the array by the previous item
E           # of the initializer list is visible to the current item.
E           a=([0]=1+2+3 [a[0]]=10 [a[6]]=hello)
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-literal.test.sh::Evaluation order (3)[L189]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c12b0>
test_file = 'array-literal.test.sh'
test_case = TestCase(name='Evaluation order (3)', script='# RHS should be expanded before any modification to the array.\na=(old1 ...02']\nvals: ['new1', 'new2', 'new3', 'old3', 'old1', 'old2']", shells=None, variant=None)], line_number=189, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Evaluation order (3) (line 189)
E           
E           stdout mismatch:
E             expected: "keys: ['0', '1', '2', '3', '4']\nvals: ['old3', 'old1', 'old2', 'old3', 'old1']\nkeys: ['0', '1', '2', '5', '201', '202']\nvals: ['new1', 'new2', 'new3', 'old3', 'old1', 'old2']"
E             actual:   "keys: ['0', '1', '2', '3', '4']\nvals: ['', '', '', '', '']\nkeys: ['0', '1', '2', '3', '4', '5', '6', '7', '8']\nvals: ['old1', 'old2', 'old3', '[0]=new1', '[1]=new2', '[2]=new3', '[5]=old3', '[a[0]]=old1', '[a[1]]=old2']"
E           
E           Expected stdout: "keys: ['0', '1', '2', '3', '4']\nvals: ['old3', 'old1', 'old2', 'old3', 'old1']\nkeys: ['0', '1', '2', '5', '201', '202']\nvals: ['new1', 'new2', 'new3', 'old3', 'old1', 'old2']"
E           Actual stdout:   "keys: ['0', '1', '2', '3', '4']\nvals: ['', '', '', '', '']\nkeys: ['0', '1', '2', '3', '4', '5', '6', '7', '8']\nvals: ['old1', 'old2', 'old3', '[0]=new1', '[1]=new2', '[2]=new3', '[5]=old3', '[a[0]]=old1', '[a[1]]=old2']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # RHS should be expanded before any modification to the array.
E           a=(old1 old2 old3)
E           a=("${a[2]}" "${a[0]}" "${a[1]}" "${a[2]}" "${a[0]}")
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           a=(old1 old2 old3)
E           old1=101 old2=102 old3=103
E           new1=201 new2=202 new3=203
E           a+=([0]=new1 [1]=new2 [2]=new3 [5]="${a[2]}" [a[0]]="${a[0]}" [a[1]]="${a[1]}")
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-literal.test.sh::[k1]=v1 (BashArray)[L208]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c1370>
test_file = 'array-literal.test.sh'
test_case = TestCase(name='[k1]=v1 (BashArray)', script='# Note: This and next tests have originally been in "spec/assign.test.sh"...["k2"]}', assertions=[Assertion(type='stdout', value='v2\nv2', shells=None, variant=None)], line_number=208, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [k1]=v1 (BashArray) (line 208)
E           
E           stdout mismatch:
E             expected: 'v2\nv2'
E             actual:   ''
E           
E           Expected stdout: 'v2\nv2'
E           Actual stdout:   '\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Note: This and next tests have originally been in "spec/assign.test.sh" and
E           # compared the behavior of OSH's BashAssoc and Bash's indexed array.  After
E           # supporting "arr=([index]=value)" for indexed arrays, the test was adjusted
E           # and copied here. See also the corresponding tests in "spec/assign.test.sh"
E           a=([k1]=v1 [k2]=v2)
E           echo ${a["k1"]}
E           echo ${a["k2"]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-literal.test.sh::[k1]=v1 (BashAssoc)[L221]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c1430>
test_file = 'array-literal.test.sh'
test_case = TestCase(name='[k1]=v1 (BashAssoc)', script='declare -A a\na=([k1]=v1 [k2]=v2)\necho ${a["k1"]}\necho ${a["k2"]}', assertions=[Assertion(type='stdout', value='v1\nv2', shells=None, variant=None)], line_number=221, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [k1]=v1 (BashAssoc) (line 221)
E           
E           stdout mismatch:
E             expected: 'v1\nv2'
E             actual:   ''
E           
E           Expected stdout: 'v1\nv2'
E           Actual stdout:   '\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A a
E           a=([k1]=v1 [k2]=v2)
E           echo ${a["k1"]}
E           echo ${a["k2"]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-literal.test.sh::[k1]=v1 looking like brace expansions (BashAssoc)[L231]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c14f0>
test_file = 'array-literal.test.sh'
test_case = TestCase(name='[k1]=v1 looking like brace expansions (BashAssoc)', script='declare -A a\na=([k2]=-{a,b}-)\necho ${a["k2"]}', assertions=[Assertion(type='stdout', value='-{a,b}-', shells=None, variant=None)], line_number=231, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [k1]=v1 looking like brace expansions (BashAssoc) (line 231)
E           
E           stdout mismatch:
E             expected: '-{a,b}-'
E             actual:   ''
E           
E           Expected stdout: '-{a,b}-'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A a
E           a=([k2]=-{a,b}-)
E           echo ${a["k2"]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-literal.test.sh::[k1]=v1 looking like brace expansions (BashArray)[L239]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c15b0>
test_file = 'array-literal.test.sh'
test_case = TestCase(name='[k1]=v1 looking like brace expansions (BashArray)', script='a=([k2]=-{a,b}-)\necho ${a["k2"]}', asserti...variant=None), Assertion(type='stdout', value='[k2]=-a-', shells=['bash'], variant='BUG')], line_number=239, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [k1]=v1 looking like brace expansions (BashArray) (line 239)
E           
E           stdout mismatch:
E             expected: '[k2]=-a-'
E             actual:   ''
E           
E           Expected stdout: '[k2]=-a-'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=([k2]=-{a,b}-)
E           echo ${a["k2"]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-literal.test.sh::BashArray cannot be changed to BashAssoc and vice versa[L249]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c1670>
test_file = 'array-literal.test.sh'
test_case = TestCase(name='BashArray cannot be changed to BashAssoc and vice versa', script='declare -a a=(1 2 3 4)\neval \'declar...e="status=1\n['1', '2', '3', '4']\nstatus=1\n['x', 'y', 'z']", shells=None, variant=None)], line_number=249, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: BashArray cannot be changed to BashAssoc and vice versa (line 249)
E           
E           stdout mismatch:
E             expected: "status=1\n['1', '2', '3', '4']\nstatus=1\n['x', 'y', 'z']"
E             actual:   "status=0\n[]\nstatus=0\n['1', '2', '3', '4']"
E           
E           Expected stdout: "status=1\n['1', '2', '3', '4']\nstatus=1\n['x', 'y', 'z']"
E           Actual stdout:   "status=0\n[]\nstatus=0\n['1', '2', '3', '4']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -a a=(1 2 3 4)
E           eval 'declare -A a=([a]=x [b]=y [c]=z)'
E           echo status=$?
E           argv.py "${a[@]}"
E           
E           declare -A A=([a]=x [b]=y [c]=z)
E           eval 'declare -a A=(1 2 3 4)'
E           echo status=$?
E           argv.py $(printf '%s\n' "${A[@]}" | sort)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::test "declare -p sp"[L85]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c1af0>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='test "declare -p sp"', script='a0=()\na1=(1)\na2=(1 2)\na=(x y z w)\na[500]=100\na[1000]=100\n\ncase $S...\ntypeset a[3]=w\ntypeset a[500]=100\ntypeset a[1000]=100', shells=['mksh'], variant='OK')], line_number=85, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: test "declare -p sp" (line 85)
E           
E           stdout mismatch:
E             expected: 'declare -a a0=()\ndeclare -a a1=([0]="1")\ndeclare -a a2=([0]="1" [1]="2")\ndeclare -a a=([0]="x" [1]="y" [2]="z" [3]="w" [500]="100" [1000]="100")'
E             actual:   ''
E           
E           Expected stdout: 'declare -a a0=()\ndeclare -a a1=([0]="1")\ndeclare -a a2=([0]="1" [1]="2")\ndeclare -a a=([0]="x" [1]="y" [2]="z" [3]="w" [500]="100" [1000]="100")'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a0=()
E           a1=(1)
E           a2=(1 2)
E           a=(x y z w)
E           a[500]=100
E           a[1000]=100
E           
E           case $SH in
E           bash|mksh)
E             typeset -p a0 a1 a2 a
E             exit ;;
E           esac
E           
E           declare -p a0 a1 a2 a
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::Negative index with a[i]=v[L188]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c1d30>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='Negative index with a[i]=v', script='case $SH in mksh) exit ;; esac\n\nsp1[9]=x\ntypeset -p sp1 | sed \...s=None, variant=None), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=188, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Negative index with a[i]=v (line 188)
E           
E           stdout mismatch:
E             expected: 'declare -a sp1=([9]=x)\ndeclare -a sp1=([0]=D [2]=C [6]=B [9]=A)'
E             actual:   'declare -a sp1=([9]=x)\ndeclare -a sp1=([-10]=D [-8]=C [-4]=B [-1]=A [9]=x)'
E           
E           Expected stdout: 'declare -a sp1=([9]=x)\ndeclare -a sp1=([0]=D [2]=C [6]=B [9]=A)'
E           Actual stdout:   'declare -a sp1=([9]=x)\ndeclare -a sp1=([-10]=D [-8]=C [-4]=B [-1]=A [9]=x)\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           sp1[9]=x
E           typeset -p sp1 | sed 's/"//g'
E           
E           sp1[-1]=A
E           sp1[-4]=B
E           sp1[-8]=C
E           sp1[-10]=D
E           typeset -p sp1 | sed 's/"//g'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::a[i]=v with BigInt[L209]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c1df0>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='a[i]=v with BigInt', script='case $SH in mksh) exit ;; esac\n\nsp1[1]=x\nsp1[5]=y\nsp1[9]=z\n\necho "${...s=None, variant=None), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=209, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: a[i]=v with BigInt (line 209)
E           
E           stdout mismatch:
E             expected: '3\n4\n5\n6'
E             actual:   '3\n3\n3\n3'
E           
E           Expected stdout: '3\n4\n5\n6'
E           Actual stdout:   '3\n3\n3\n3\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           sp1[1]=x
E           sp1[5]=y
E           sp1[9]=z
E           
E           echo "${#sp1[@]}"
E           sp1[0x7FFFFFFFFFFFFFFF]=a
E           echo "${#sp1[@]}"
E           sp1[0x7FFFFFFFFFFFFFFE]=b
E           echo "${#sp1[@]}"
E           sp1[0x7FFFFFFFFFFFFFFD]=c
E           echo "${#sp1[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::Negative out-of-bound index with a[i]=v (1/2)[L236]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c1eb0>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='Negative out-of-bound index with a[i]=v (1/2)', script='case $SH in mksh) exit ;; esac\n\nsp1[9]=x\nsp1..., variant='N-I'), Assertion(type='stderr-json', value='', shells=['mksh'], variant='N-I')], line_number=236, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Negative out-of-bound index with a[i]=v (1/2) (line 236)
E           
E           stdout mismatch:
E             expected: 'declare -a sp1=([9]="x")'
E             actual:   'declare -a sp1=([-11]="E" [9]="x")'
E           stderr mismatch:
E             expected: 'bash: line 4: sp1[-11]: bad array subscript'
E             actual:   ''
E           
E           Expected stdout: 'declare -a sp1=([9]="x")'
E           Actual stdout:   'declare -a sp1=([-11]="E" [9]="x")\n'
E           Expected stderr: 'bash: line 4: sp1[-11]: bad array subscript'
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           sp1[9]=x
E           sp1[-11]=E
E           declare -p sp1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::Negative out-of-bound index with a[i]=v (2/2)[L265]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c1f70>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='Negative out-of-bound index with a[i]=v (2/2)', script='case $SH in mksh) exit ;; esac\n\nsp1[9]=x\n\ns..., variant='N-I'), Assertion(type='stderr-json', value='', shells=['mksh'], variant='N-I')], line_number=265, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Negative out-of-bound index with a[i]=v (2/2) (line 265)
E           
E           stdout mismatch:
E             expected: 'declare -a sp1=([9]="x")'
E             actual:   'declare -a sp1=([-21]="F" [9]="x")'
E           stderr mismatch:
E             expected: 'bash: line 5: sp1[-21]: bad array subscript'
E             actual:   ''
E           
E           Expected stdout: 'declare -a sp1=([9]="x")'
E           Actual stdout:   'declare -a sp1=([-21]="F" [9]="x")\n'
E           Expected stderr: 'bash: line 5: sp1[-21]: bad array subscript'
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           sp1[9]=x
E           
E           sp1[-21]=F
E           declare -p sp1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::unset -v a[i] with out-of-bound negative index[L375]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c21b0>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='unset -v a[i] with out-of-bound negative index', script='case $SH in mksh) exit ;; esac\n\na=(1)\n\nuns...ksh'], variant='N-I'), Assertion(type='stderr', value='', shells=['mksh'], variant='N-I')], line_number=375, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: unset -v a[i] with out-of-bound negative index (line 375)
E           
E           stderr mismatch:
E             expected: 'bash: line 5: unset: [-2]: bad array subscript\nbash: line 6: unset: [-3]: bad array subscript'
E             actual:   ''
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   ''
E           Expected stderr: 'bash: line 5: unset: [-2]: bad array subscript\nbash: line 6: unset: [-3]: bad array subscript'
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           a=(1)
E           
E           unset -v "a[-2]"
E           unset -v "a[-3]"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::unset -v a[i] for max index[L405]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c2270>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='unset -v a[i] for max index', script="case $SH in mksh) exit ;; esac\n\na=({1..9})\nunset -v 'a[-1]'\na...bash'], variant='OK'), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=405, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: unset -v a[i] for max index (line 405)
E           
E           stdout mismatch:
E             expected: 'declare -a a=([0]="1" [1]="2" [2]="3" [3]="4" [4]="5" [5]="6" [6]="7" [7]="x")\ndeclare -a a=([0]="1" [1]="2" [2]="3" [3]="4" [4]="5" [5]="6" [6]="x")'
E             actual:   'declare -a a=([-1]="x" [0]="{1..9}")\ndeclare -a a=([-1]="x" [0]="{1..9}")'
E           
E           Expected stdout: 'declare -a a=([0]="1" [1]="2" [2]="3" [3]="4" [4]="5" [5]="6" [6]="7" [7]="x")\ndeclare -a a=([0]="1" [1]="2" [2]="3" [3]="4" [4]="5" [5]="6" [6]="x")'
E           Actual stdout:   'declare -a a=([-1]="x" [0]="{1..9}")\ndeclare -a a=([-1]="x" [0]="{1..9}")\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           a=({1..9})
E           unset -v 'a[-1]'
E           a[-1]=x
E           declare -p a
E           unset -v 'a[-1]'
E           a[-1]=x
E           declare -p a
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::[[ -v a[i] ]][L430]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c2330>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='[[ -v a[i] ]]', script='case $SH in mksh) exit ;; esac\n\nsp1=()\n[[ -v sp1[0] ]]; echo "$? (expect 1)"...s=None, variant=None), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=430, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ -v a[i] ]] (line 430)
E           
E           stdout mismatch:
E             expected: '1 (expect 1)\n1 (expect 1)\n0 (expect 0)\n0 (expect 0)\n1 (expect 1)\n0 (expect 0)\n0 (expect 0)\n0 (expect 0)\n0 (expect 0)\n1 (expect 1)\n0 (expect 0)\n0 (expect 0)\n0 (expect 0)\n1 (expect 1)\n0 (expect 0)\n0 (expect 0)'
E             actual:   '1 (expect 1)\n1 (expect 1)\n0 (expect 0)\n1 (expect 0)\n1 (expect 1)\n1 (expect 0)\n1 (expect 0)\n1 (expect 0)\n1 (expect 0)\n1 (expect 1)\n1 (expect 0)\n1 (expect 0)\n1 (expect 0)\n1 (expect 1)\n1 (expect 0)\n1 (expect 0)'
E           
E           Expected stdout: '1 (expect 1)\n1 (expect 1)\n0 (expect 0)\n0 (expect 0)\n1 (expect 1)\n0 (expect 0)\n0 (expect 0)\n0 (expect 0)\n0 (expect 0)\n1 (expect 1)\n0 (expect 0)\n0 (expect 0)\n0 (expect 0)\n1 (expect 1)\n0 (expect 0)\n0 (expect 0)'
E           Actual stdout:   '1 (expect 1)\n1 (expect 1)\n0 (expect 0)\n1 (expect 0)\n1 (expect 1)\n1 (expect 0)\n1 (expect 0)\n1 (expect 0)\n1 (expect 0)\n1 (expect 1)\n1 (expect 0)\n1 (expect 0)\n1 (expect 0)\n1 (expect 1)\n1 (expect 0)\n1 (expect 0)\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           sp1=()
E           [[ -v sp1[0] ]]; echo "$? (expect 1)"
E           [[ -v sp1[9] ]]; echo "$? (expect 1)"
E           
E           sp2=({1..9})
E           [[ -v sp2[0] ]]; echo "$? (expect 0)"
E           [[ -v sp2[8] ]]; echo "$? (expect 0)"
E           [[ -v sp2[9] ]]; echo "$? (expect 1)"
E           [[ -v sp2[-1] ]]; echo "$? (expect 0)"
E           [[ -v sp2[-2] ]]; echo "$? (expect 0)"
E           [[ -v sp2[-9] ]]; echo "$? (expect 0)"
E           
E           sp3=({1..9})
E           unset -v 'sp3[4]'
E           [[ -v sp3[3] ]]; echo "$? (expect 0)"
E           [[ -v sp3[4] ]]; echo "$? (expect 1)"
E           [[ -v sp3[5] ]]; echo "$? (expect 0)"
E           [[ -v sp3[-1] ]]; echo "$? (expect 0)"
E           [[ -v sp3[-4] ]]; echo "$? (expect 0)"
E           [[ -v sp3[-5] ]]; echo "$? (expect 1)"
E           [[ -v sp3[-6] ]]; echo "$? (expect 0)"
E           [[ -v sp3[-9] ]]; echo "$? (expect 0)"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::[[ -v a[i] ]] with invalid negative index[L479]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c23f0>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='[[ -v a[i] ]] with invalid negative index', script='case $SH in mksh) exit ;; esac\n\nsp1=()\n([[ -v sp..., variant='N-I'), Assertion(type='stderr-json', value='', shells=['mksh'], variant='N-I')], line_number=479, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ -v a[i] ]] with invalid negative index (line 479)
E           
E           stderr mismatch:
E             expected: 'bash: line 4: sp1: bad array subscript\nbash: line 6: sp2: bad array subscript\nbash: line 9: sp3: bad array subscript'
E             actual:   ''
E           
E           Expected stdout: '1 (expect 1)\n1 (expect 1)\n1 (expect 1)'
E           Actual stdout:   '1 (expect 1)\n1 (expect 1)\n1 (expect 1)\n'
E           Expected stderr: 'bash: line 4: sp1: bad array subscript\nbash: line 6: sp2: bad array subscript\nbash: line 9: sp3: bad array subscript'
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           sp1=()
E           ([[ -v sp1[-1] ]]; echo "$? (expect 1)")
E           sp2=({1..9})
E           ([[ -v sp2[-10] ]]; echo "$? (expect 1)")
E           sp3=({1..9})
E           unset -v 'sp3[4]'
E           ([[ -v sp3[-10] ]]; echo "$? (expect 1)")
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::((sp[i])) and ((sp[i]++)) with invalid negative index[L568]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c2570>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='((sp[i])) and ((sp[i]++)) with invalid negative index', script="case $SH in mksh) exit ;; esac\n\na=({1...ksh'], variant='N-I'), Assertion(type='stderr', value='', shells=['mksh'], variant='N-I')], line_number=568, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ((sp[i])) and ((sp[i]++)) with invalid negative index (line 568)
E           
E           stderr mismatch:
E             expected: 'bash: line 6: a: bad array subscript'
E             actual:   ''
E           
E           Expected stdout: '0'
E           Actual stdout:   '0\n'
E           Expected stderr: 'bash: line 6: a: bad array subscript'
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           a=({1..9})
E           unset -v 'a[2]' 'a[3]' 'a[7]'
E           
E           echo $((a[-10]))
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[array-sparse.test.sh::${sp[i]}[L595]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c2630>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='${sp[i]}', script='case $SH in mksh) exit ;; esac\n\nsp=({1..9})\nunset -v \'sp[2]\'\nunset -v \'sp[3]\...s=None, variant=None), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=595, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${sp[i]} (line 595)
E           
E           stdout mismatch:
E             expected: "sp[0]: '1', 1, set.\nsp[1]: '2', 2, set.\nsp[8]: '9', 9, set.\nsp[2]: '', (empty), .\nsp[3]: '', (empty), .\nsp[7]: '', (empty), .\nsp[-1]: '9'.\nsp[-2]: ''.\nsp[-3]: '7'.\nsp[-4]: '6'.\nsp[-9]: '1'."
E             actual:   "sp[0]: '{1..9}', (empty), .\nsp[1]: '', (empty), .\nsp[8]: '', (empty), .\nsp[2]: '', (empty), .\nsp[3]: '', (empty), .\nsp[7]: '', (empty), .\nsp[-1]: '{1..9}'.\nsp[-2]: ''.\nsp[-3]: ''.\nsp[-4]: ''.\nsp[-9]: ''."
E           
E           Expected stdout: "sp[0]: '1', 1, set.\nsp[1]: '2', 2, set.\nsp[8]: '9', 9, set.\nsp[2]: '', (empty), .\nsp[3]: '', (empty), .\nsp[7]: '', (empty), .\nsp[-1]: '9'.\nsp[-2]: ''.\nsp[-3]: '7'.\nsp[-4]: '6'.\nsp[-9]: '1'."
E           Actual stdout:   "sp[0]: '{1..9}', (empty), .\nsp[1]: '', (empty), .\nsp[8]: '', (empty), .\nsp[2]: '', (empty), .\nsp[3]: '', (empty), .\nsp[7]: '', (empty), .\nsp[-1]: '{1..9}'.\nsp[-2]: ''.\nsp[-3]: ''.\nsp[-4]: ''.\nsp[-9]: ''.\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           sp=({1..9})
E           unset -v 'sp[2]'
E           unset -v 'sp[3]'
E           unset -v 'sp[7]'
E           
E           echo "sp[0]: '${sp[0]}', ${sp[0]:-(empty)}, ${sp[0]+set}."
E           echo "sp[1]: '${sp[1]}', ${sp[1]:-(empty)}, ${sp[1]+set}."
E           echo "sp[8]: '${sp[8]}', ${sp[8]:-(empty)}, ${sp[8]+set}."
E           echo "sp[2]: '${sp[2]}', ${sp[2]:-(empty)}, ${sp[2]+set}."
E           echo "sp[3]: '${sp[3]}', ${sp[3]:-(empty)}, ${sp[3]+set}."
E           echo "sp[7]: '${sp[7]}', ${sp[7]:-(empty)}, ${sp[7]+set}."
E           
E           echo "sp[-1]: '${sp[-1]}'."
E           echo "sp[-2]: '${sp[-2]}'."
E           echo "sp[-3]: '${sp[-3]}'."
E           echo "sp[-4]: '${sp[-4]}'."
E           echo "sp[-9]: '${sp[-9]}'."
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::${sp[i]} with negative invalid index[L634]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c26f0>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='${sp[i]} with negative invalid index', script='case $SH in mksh) exit ;; esac\n\nsp=({1..9})\nunset -v ...ksh'], variant='N-I'), Assertion(type='stderr', value='', shells=['mksh'], variant='N-I')], line_number=634, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${sp[i]} with negative invalid index (line 634)
E           
E           stderr mismatch:
E             expected: 'bash: line 8: sp: bad array subscript\nbash: line 9: sp: bad array subscript\nbash: line 10: sp: bad array subscript'
E             actual:   ''
E           
E           Expected stdout: "sp[-10]: ''.\nsp[-11]: ''.\nsp[-19]: ''."
E           Actual stdout:   "sp[-10]: ''.\nsp[-11]: ''.\nsp[-19]: ''.\n"
E           Expected stderr: 'bash: line 8: sp: bad array subscript\nbash: line 9: sp: bad array subscript\nbash: line 10: sp: bad array subscript'
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           sp=({1..9})
E           unset -v 'sp[2]'
E           unset -v 'sp[3]'
E           unset -v 'sp[7]'
E           
E           echo "sp[-10]: '${sp[-10]}'."
E           echo "sp[-11]: '${sp[-11]}'."
E           echo "sp[-19]: '${sp[-19]}'."
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::${a[@]:offset:length}[L675]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c27b0>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='${a[@]:offset:length}', script='case $SH in mksh) exit ;; esac\n\na=(v{0..9})\nunset -v \'a[2]\' \'a[3]...s=None, variant=None), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=675, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${a[@]:offset:length} (line 675)
E           
E           stdout mismatch:
E             expected: '==== ${a[@]:offset} ====\n[v0 v1 v5 v6 v8 v9][v0 v1 v5 v6 v8 v9]\n[v5 v6 v8 v9][v5 v6 v8 v9]\n[v5 v6 v8 v9][v5 v6 v8 v9]\n[v5 v6 v8 v9][v5 v6 v8 v9]\n[v9][v9]\n[][]\n[][]\n==== ${a[@]:negative} ====\n[v9][v9]\n[v8 v9][v8 v9]\n[v5 v6 v8 v9][v5 v6 v8 v9]\n[v1 v5 v6 v8 v9][v1 v5 v6 v8 v9]\n[v0 v1 v5 v6 v8 v9][v0 v1 v5 v6 v8 v9]\n[][]\n[][]\n==== ${a[@]:offset:length} ====\n[][]\n[v0][v0]\n[v0 v1 v5][v0 v1 v5]\n[v5][v5]\n[v5 v6 v8 v9][v5 v6 v8 v9]\n[v5 v6 v8 v9][v5 v6 v8 v9]\n[v5 v6 v8 v9][v5 v6 v8 v9]\n[][]\n[v9][v9]\n[v9][v9]\n[][]'
E             actual:   '==== ${a[@]:offset} ====\n[v{0..9}][v{0..9}]\n[][]\n[][]\n[][]\n[][]\n[][]\n[][]\n==== ${a[@]:negative} ====\n[v{0..9}][v{0..9}]\n[v{0..9}][v{0..9}]\n[v{0..9}][v{0..9}]\n[v{0..9}][v{0..9}]\n[v{0..9}][v{0..9}]\n[v{0..9}][v{0..9}]\n[v{0..9}][v{0..9}]\n==== ${a[@]:offset:length} ====\n[][]\n[v{0..9}][v{0..9}]\n[v{0..9}][v{0..9}]\n[][]\n[][]\n[][]\n[][]\n[][]\n[][]\n[][]\n[][]'
E           
E           Expected stdout: '==== ${a[@]:offset} ====\n[v0 v1 v5 v6 v8 v9][v0 v1 v5 v6 v8 v9]\n[v5 v6 v8 v9][v5 v6 v8 v9]\n[v5 v6 v8 v9][v5 v6 v8 v9]\n[v5 v6 v8 v9][v5 v6 v8 v9]\n[v9][v9]\n[][]\n[][]\n==== ${a[@]:negative} ====\n[v9][v9]\n[v8 v9][v8 v9]\n[v5 v6 v8 v9][v5 v6 v8 v9]\n[v1 v5 v6 v8 v9][v1 v5 v6 v8 v9]\n[v0 v1 v5 v6 v8 v9][v0 v1 v5 v6 v8 v9]\n[][]\n[][]\n==== ${a[@]:offset:length} ====\n[][]\n[v0][v0]\n[v0 v1 v5][v0 v1 v5]\n[v5][v5]\n[v5 v6 v8 v9][v5 v6 v8 v9]\n[v5 v6 v8 v9][v5 v6 v8 v9]\n[v5 v6 v8 v9][v5 v6 v8 v9]\n[][]\n[v9][v9]\n[v9][v9]\n[][]'
E           Actual stdout:   '==== ${a[@]:offset} ====\n[v{0..9}][v{0..9}]\n[][]\n[][]\n[][]\n[][]\n[][]\n[][]\n==== ${a[@]:negative} ====\n[v{0..9}][v{0..9}]\n[v{0..9}][v{0..9}]\n[v{0..9}][v{0..9}]\n[v{0..9}][v{0..9}]\n[v{0..9}][v{0..9}]\n[v{0..9}][v{0..9}]\n[v{0..9}][v{0..9}]\n==== ${a[@]:offset:length} ====\n[][]\n[v{0..9}][v{0..9}]\n[v{0..9}][v{0..9}]\n[][]\n[][]\n[][]\n[][]\n[][]\n[][]\n[][]\n[][]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           a=(v{0..9})
E           unset -v 'a[2]' 'a[3]' 'a[4]' 'a[7]'
E           
E           echo '==== ${a[@]:offset} ===='
E           echo "[${a[@]:0}][${a[*]:0}]"
E           echo "[${a[@]:2}][${a[*]:2}]"
E           echo "[${a[@]:3}][${a[*]:3}]"
E           echo "[${a[@]:5}][${a[*]:5}]"
E           echo "[${a[@]:9}][${a[*]:9}]"
E           echo "[${a[@]:10}][${a[*]:10}]"
E           echo "[${a[@]:11}][${a[*]:11}]"
E           
E           echo '==== ${a[@]:negative} ===='
E           echo "[${a[@]: -1}][${a[*]: -1}]"
E           echo "[${a[@]: -2}][${a[*]: -2}]"
E           echo "[${a[@]: -5}][${a[*]: -5}]"
E           echo "[${a[@]: -9}][${a[*]: -9}]"
E           echo "[${a[@]: -10}][${a[*]: -10}]"
E           echo "[${a[@]: -11}][${a[*]: -11}]"
E           echo "[${a[@]: -21}][${a[*]: -21}]"
E           
E           echo '==== ${a[@]:offset:length} ===='
E           echo "[${a[@]:0:0}][${a[*]:0:0}]"
E           echo "[${a[@]:0:1}][${a[*]:0:1}]"
E           echo "[${a[@]:0:3}][${a[*]:0:3}]"
E           echo "[${a[@]:2:1}][${a[*]:2:1}]"
E           echo "[${a[@]:2:4}][${a[*]:2:4}]"
E           echo "[${a[@]:3:4}][${a[*]:3:4}]"
E           echo "[${a[@]:5:4}][${a[*]:5:4}]"
E           echo "[${a[@]:5:0}][${a[*]:5:0}]"
E           echo "[${a[@]:9:1}][${a[*]:9:1}]"
E           echo "[${a[@]:9:2}][${a[*]:9:2}]"
E           echo "[${a[@]:10:1}][${a[*]:10:1}]"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::${@:offset:length}[L747]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c2870>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='${@:offset:length}', script='case $SH in mksh) exit ;; esac\n\nset -- v{1..9}\n\n{\n  echo \'==== ${@:o...s=None, variant=None), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=747, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${@:offset:length} (line 747)
E           
E           stdout mismatch:
E             expected: '==== ${@:offset:length} ====\n[$SH v1 v2][$SH v1 v2]\n[v1 v2 v3][v1 v2 v3]\n[v3 v4 v5][v3 v4 v5]\n[v5 v6 v7 v8 v9][v5 v6 v7 v8 v9]\n==== ${@:negative} ====\n[v9][v9]\n[v7 v8 v9][v7 v8 v9]\n[v1 v2 v3 v4 v5 v6 v7 v8 v9][v1 v2 v3 v4 v5 v6 v7 v8 v9]\n[$SH v1 v2 v3 v4 v5 v6 v7 v8 v9][$SH v1 v2 v3 v4 v5 v6 v7 v8 v9]\n[][]\n[v7 v8][v7 v8]\n[v1 v2 v3 v4][v1 v2 v3 v4]\n[$SH v1 v2 v3][$SH v1 v2 v3]\n[][]'
E             actual:   '==== ${@:offset:length} ====\n[v1 ][v1 ]\n[1 v][1 v]\n[v2 ][v2 ]\n[ v3 v4 v5 ][ v3 v4 v5 ]\n==== ${@:negative} ====\n[9][9]\n[ v9][ v9]\n[ v7 v8 v9][ v7 v8 v9]\n[6 v7 v8 v9][6 v7 v8 v9]\n[v6 v7 v8 v9][v6 v7 v8 v9]\n[ v][ v]\n[ v7 ][ v7 ]\n[6 v7][6 v7]\n[v6 v][v6 v]'
E           
E           Expected stdout: '==== ${@:offset:length} ====\n[$SH v1 v2][$SH v1 v2]\n[v1 v2 v3][v1 v2 v3]\n[v3 v4 v5][v3 v4 v5]\n[v5 v6 v7 v8 v9][v5 v6 v7 v8 v9]\n==== ${@:negative} ====\n[v9][v9]\n[v7 v8 v9][v7 v8 v9]\n[v1 v2 v3 v4 v5 v6 v7 v8 v9][v1 v2 v3 v4 v5 v6 v7 v8 v9]\n[$SH v1 v2 v3 v4 v5 v6 v7 v8 v9][$SH v1 v2 v3 v4 v5 v6 v7 v8 v9]\n[][]\n[v7 v8][v7 v8]\n[v1 v2 v3 v4][v1 v2 v3 v4]\n[$SH v1 v2 v3][$SH v1 v2 v3]\n[][]'
E           Actual stdout:   '==== ${@:offset:length} ====\n[v1 ][v1 ]\n[1 v][1 v]\n[v2 ][v2 ]\n[ v3 v4 v5 ][ v3 v4 v5 ]\n==== ${@:negative} ====\n[9][9]\n[ v9][ v9]\n[ v7 v8 v9][ v7 v8 v9]\n[6 v7 v8 v9][6 v7 v8 v9]\n[v6 v7 v8 v9][v6 v7 v8 v9]\n[ v][ v]\n[ v7 ][ v7 ]\n[6 v7][6 v7]\n[v6 v][v6 v]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           set -- v{1..9}
E           
E           {
E             echo '==== ${@:offset:length} ===='
E             echo "[${*:0:3}][${*:0:3}]"
E             echo "[${*:1:3}][${*:1:3}]"
E             echo "[${*:3:3}][${*:3:3}]"
E             echo "[${*:5:10}][${*:5:10}]"
E           
E             echo '==== ${@:negative} ===='
E             echo "[${*: -1}][${*: -1}]"
E             echo "[${*: -3}][${*: -3}]"
E             echo "[${*: -9}][${*: -9}]"
E             echo "[${*: -10}][${*: -10}]"
E             echo "[${*: -11}][${*: -11}]"
E             echo "[${*: -3:2}][${*: -3:2}]"
E             echo "[${*: -9:4}][${*: -9:4}]"
E             echo "[${*: -10:4}][${*: -10:4}]"
E             echo "[${*: -11:4}][${*: -11:4}]"
E           } | sed "s:$SH:\$SH:g;s:${SH##*/}:\$SH:g"
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[array-sparse.test.sh::${a[@]}[L848]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c29f0>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='${a[@]}', script='a=(v{0,1,2,3,4,5,6,7,8,9})\nunset -v \'a[2]\' \'a[3]\' \'a[4]\' \'a[7]\'\n\nargv.py "...v6', 'v8', 'v9']\n['abcv0', 'v1', 'v5', 'v6', 'v8', 'v9xyz']", shells=None, variant=None)], line_number=848, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${a[@]} (line 848)
E           
E           stdout mismatch:
E             expected: "['v0', 'v1', 'v5', 'v6', 'v8', 'v9']\n['abcv0', 'v1', 'v5', 'v6', 'v8', 'v9xyz']"
E             actual:   "['v{0,1,2,3,4,5,6,7,8,9}']\n['abcv{0,1,2,3,4,5,6,7,8,9}xyz']"
E           
E           Expected stdout: "['v0', 'v1', 'v5', 'v6', 'v8', 'v9']\n['abcv0', 'v1', 'v5', 'v6', 'v8', 'v9xyz']"
E           Actual stdout:   "['v{0,1,2,3,4,5,6,7,8,9}']\n['abcv{0,1,2,3,4,5,6,7,8,9}xyz']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(v{0,1,2,3,4,5,6,7,8,9})
E           unset -v 'a[2]' 'a[3]' 'a[4]' 'a[7]'
E           
E           argv.py "${a[@]}"
E           argv.py "abc${a[@]}xyz"
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[array-sparse.test.sh::${a[@]#...}[L861]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c2ab0>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='${a[@]#...}', script='case $SH in mksh) exit ;; esac\n\na=(v{0..9})\nunset -v \'a[2]\' \'a[3]\' \'a[4]\...s=None, variant=None), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=861, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${a[@]#...} (line 861)
E           
E           stdout mismatch:
E             expected: "['0', '1', '5', '6', '8', '9']\n['abc0', '1', '5', '6', '8', '9xyz']\n['v', 'v', 'v', 'v6', 'v8', 'v9']\n['abcv', 'v', 'v', 'v6', 'v8', 'v9xyz']\n['', '', '', '', '', '']"
E             actual:   "['{0..9}']\n['abc{0..9}xyz']\n['v{0..9}']\n['abcv{0..9}xyz']\n['0..9}']"
E           
E           Expected stdout: "['0', '1', '5', '6', '8', '9']\n['abc0', '1', '5', '6', '8', '9xyz']\n['v', 'v', 'v', 'v6', 'v8', 'v9']\n['abcv', 'v', 'v', 'v6', 'v8', 'v9xyz']\n['', '', '', '', '', '']"
E           Actual stdout:   "['{0..9}']\n['abc{0..9}xyz']\n['v{0..9}']\n['abcv{0..9}xyz']\n['0..9}']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           a=(v{0..9})
E           unset -v 'a[2]' 'a[3]' 'a[4]' 'a[7]'
E           
E           argv.py "${a[@]#v}"
E           argv.py "abc${a[@]#v}xyz"
E           argv.py "${a[@]%[0-5]}"
E           argv.py "abc${a[@]%[0-5]}xyz"
E           argv.py "${a[@]#v?}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::${a[@]/pat/rep}[L885]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c2b70>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='${a[@]/pat/rep}', script='case $SH in mksh) exit ;; esac\n\na=(v{0..9})\nunset -v \'a[2]\' \'a[3]\' \'a...s=None, variant=None), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=885, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${a[@]/pat/rep} (line 885)
E           
E           stdout mismatch:
E             expected: "['0', '1', '5', '6', '8', '9']\n['', '', '', '', '', '']\n['0', '1', '5', '6', '8', '9']\n['v', 'v', 'v', 'v', 'v', 'v']\n['x0', 'x1', 'x5', 'x6', 'x8', 'x9']\n['x0', 'x1', 'x5', 'x6', 'x8', 'x9']\n['vD', 'vD', 'vD', 'v6', 'v8', 'v9']\n['_0', '_1', '_5', '__', '__', '__']"
E             actual:   "['{0..9}']\n['']\n['{0..9}']\n['v{0..9']\n['x{0..9}']\n['x{0..9}']\n['v{D..9}']\n['__0____']"
E           
E           Expected stdout: "['0', '1', '5', '6', '8', '9']\n['', '', '', '', '', '']\n['0', '1', '5', '6', '8', '9']\n['v', 'v', 'v', 'v', 'v', 'v']\n['x0', 'x1', 'x5', 'x6', 'x8', 'x9']\n['x0', 'x1', 'x5', 'x6', 'x8', 'x9']\n['vD', 'vD', 'vD', 'v6', 'v8', 'v9']\n['_0', '_1', '_5', '__', '__', '__']"
E           Actual stdout:   "['{0..9}']\n['']\n['{0..9}']\n['v{0..9']\n['x{0..9}']\n['x{0..9}']\n['v{D..9}']\n['__0____']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           a=(v{0..9})
E           unset -v 'a[2]' 'a[3]' 'a[4]' 'a[7]'
E           
E           argv.py "${a[@]/?}"
E           argv.py "${a[@]//?}"
E           argv.py "${a[@]/#?}"
E           argv.py "${a[@]/%?}"
E           
E           argv.py "${a[@]/v/x}"
E           argv.py "${a[@]//v/x}"
E           argv.py "${a[@]/[0-5]/D}"
E           argv.py "${a[@]//[!0-5]/_}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::${a[@]@P}, ${a[@]@Q}, and ${a[@]@a}[L917]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c2c30>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='${a[@]@P}, ${a[@]@Q}, and ${a[@]@a}', script='case $SH in mksh) exit ;; esac\n\na=(v{0..9})\nunset -v \...bash'], variant='OK'), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=917, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${a[@]@P}, ${a[@]@Q}, and ${a[@]@a} (line 917)
E           
E           stdout mismatch:
E             expected: '[\'v0\', \'v1\', \'v5\', \'v6\', \'v8\', \'v9\']\n[\'v0 v1 v5 v6 v8 v9\']\n["\'v0\'", "\'v1\'", "\'v5\'", "\'v6\'", "\'v8\'", "\'v9\'"]\n["\'v0\' \'v1\' \'v5\' \'v6\' \'v8\' \'v9\'"]\n[\'a\', \'a\', \'a\', \'a\', \'a\', \'a\']\n[\'a a a a a a\']'
E             actual:   '[\'v{0..9}\']\n[\'v{0..9}\']\n["\'v{0..9}\'"]\n["\'v{0..9}\'"]\n[\'\']\n[\'\']'
E           
E           Expected stdout: '[\'v0\', \'v1\', \'v5\', \'v6\', \'v8\', \'v9\']\n[\'v0 v1 v5 v6 v8 v9\']\n["\'v0\'", "\'v1\'", "\'v5\'", "\'v6\'", "\'v8\'", "\'v9\'"]\n["\'v0\' \'v1\' \'v5\' \'v6\' \'v8\' \'v9\'"]\n[\'a\', \'a\', \'a\', \'a\', \'a\', \'a\']\n[\'a a a a a a\']'
E           Actual stdout:   '[\'v{0..9}\']\n[\'v{0..9}\']\n["\'v{0..9}\'"]\n["\'v{0..9}\'"]\n[\'\']\n[\'\']\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           a=(v{0..9})
E           unset -v 'a[2]' 'a[3]' 'a[4]' 'a[7]'
E           
E           argv.py "${a[@]@P}"
E           argv.py "${a[*]@P}"
E           argv.py "${a[@]@Q}"
E           argv.py "${a[*]@Q}"
E           argv.py "${a[@]@a}"
E           argv.py "${a[*]@a}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[array-sparse.test.sh::${a-}[L974]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c2db0>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='${a-}', script='a1=()\na2=("" "")\na3=(foo bar)\n\necho "$a1, ${a1-(unset)}, ${a1:-(empty)};"\necho "$a...t', value=', (unset), (empty);\n, , (empty);\nfoo, foo, foo;', shells=None, variant=None)], line_number=974, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${a-} (line 974)
E           
E           stdout mismatch:
E             expected: ', (unset), (empty);\n, , (empty);\nfoo, foo, foo;'
E             actual:   ', , (empty);\n, , (empty);\nfoo, foo, foo;'
E           
E           Expected stdout: ', (unset), (empty);\n, , (empty);\nfoo, foo, foo;'
E           Actual stdout:   ', , (empty);\n, , (empty);\nfoo, foo, foo;\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a1=()
E           a2=("" "")
E           a3=(foo bar)
E           
E           echo "$a1, ${a1-(unset)}, ${a1:-(empty)};"
E           echo "$a2, ${a2-(unset)}, ${a2:-(empty)};"
E           echo "$a3, ${a3-(unset)}, ${a3:-(empty)};"
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[array-sparse.test.sh::${!a[@]}[L1006]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c2f30>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='${!a[@]}', script='case $SH in mksh) exit ;; esac\n\na=(v{0..9})\nunset -v \'a[3]\' \'a[4]\' \'a[7]\' \...=None, variant=None), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=1006, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${!a[@]} (line 1006)
E           
E           stdout mismatch:
E             expected: "['0', '1', '2', '5', '6', '8']"
E             actual:   "['0']"
E           
E           Expected stdout: "['0', '1', '2', '5', '6', '8']"
E           Actual stdout:   "['0']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           a=(v{0..9})
E           unset -v 'a[3]' 'a[4]' 'a[7]' 'a[9]'
E           
E           argv.py "${!a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[array-sparse.test.sh::"${a[*]}"[L1022]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c2ff0>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='"${a[*]}"', script='a=(v{0,1,2,3,4,5,6,7,8,9})\nunset -v \'a[3]\' \'a[4]\' \'a[7]\' \'a[9]\'\n\necho "$..., value='v0 v1 v2 v5 v6 v8\nv0v1v2v5v6v8\nv0/v1/v2/v5/v6/v8', shells=None, variant=None)], line_number=1022, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: "${a[*]}" (line 1022)
E           
E           stdout mismatch:
E             expected: 'v0 v1 v2 v5 v6 v8\nv0v1v2v5v6v8\nv0/v1/v2/v5/v6/v8'
E             actual:   'v{0,1,2,3,4,5,6,7,8,9}\nv{0,1,2,3,4,5,6,7,8,9}\nv{0,1,2,3,4,5,6,7,8,9}'
E           
E           Expected stdout: 'v0 v1 v2 v5 v6 v8\nv0v1v2v5v6v8\nv0/v1/v2/v5/v6/v8'
E           Actual stdout:   'v{0,1,2,3,4,5,6,7,8,9}\nv{0,1,2,3,4,5,6,7,8,9}\nv{0,1,2,3,4,5,6,7,8,9}\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(v{0,1,2,3,4,5,6,7,8,9})
E           unset -v 'a[3]' 'a[4]' 'a[7]' 'a[9]'
E           
E           echo "${a[*]}"
E           IFS=
E           echo "${a[*]}"
E           IFS=/
E           echo "${a[*]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::compgen -F _set_COMPREPLY[L1039]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c30b0>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='compgen -F _set_COMPREPLY', script="case $SH in mksh) exit ;; esac\n\n_set_COMPREPLY() {\n  COMPREPLY=(...=None, variant=None), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=1039, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -F _set_COMPREPLY (line 1039)
E           
E           stdout mismatch:
E             expected: '0\n1\n3\n5\n7\n8\n9'
E             actual:   ''
E           
E           Expected stdout: '0\n1\n3\n5\n7\n8\n9'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           _set_COMPREPLY() {
E             COMPREPLY=({0..9})
E             unset -v 'COMPREPLY[2]' 'COMPREPLY[4]' 'COMPREPLY[6]'
E           }
E           
E           compgen -F _set_COMPREPLY
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::Regression: a[-1]=1[L1202]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c35f0>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='Regression: a[-1]=1', script='case $SH in mksh) exit 99 ;; esac\n\na[-1]=1', assertions=[Assertion(type... variant='N-I'), Assertion(type='stderr-json', value='', shells=['mksh'], variant='N-I')], line_number=1202, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Regression: a[-1]=1 (line 1202)
E           
E           stderr mismatch:
E             expected: 'bash: line 3: a[-1]: bad array subscript'
E             actual:   ''
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   ''
E           Expected stderr: 'bash: line 3: a[-1]: bad array subscript'
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit 99 ;; esac
E           
E           a[-1]=1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array-sparse.test.sh::Initializing indexed array with ([index]=value)[L1222]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c36b0>
test_file = 'array-sparse.test.sh'
test_case = TestCase(name='Initializing indexed array with ([index]=value)', script='case $SH in mksh) exit 99 ;; esac\ndeclare -a... variant='N-I'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='N-I')], line_number=1222, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Initializing indexed array with ([index]=value) (line 1222)
E           
E           stdout mismatch:
E             expected: "status=0\n['3']"
E             actual:   'status=0\n[]'
E           
E           Expected stdout: "status=0\n['3']"
E           Actual stdout:   'status=0\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit 99 ;; esac
E           declare -a a=([xx]=1 [yy]=2 [zz]=3)
E           echo status=$?
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
______ TestBashSpecTests.test_spec_case[array.test.sh::local array[L19]] _______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c3830>
test_file = 'array.test.sh'
test_case = TestCase(name='local array', script='# mksh support local variables, but not local arrays, oddly.\nf() {\n  local a=(1...], variant='BUG'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='BUG')], line_number=19, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: local array (line 19)
E           
E           stdout mismatch:
E             expected: "['1']"
E             actual:   "['']"
E           
E           Expected stdout: "['1']"
E           Actual stdout:   "['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # mksh support local variables, but not local arrays, oddly.
E           f() {
E             local a=(1 '2 3')
E             argv.py "${a[0]}"
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Command with with word splitting in array[L31]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c38f0>
test_file = 'array.test.sh'
test_case = TestCase(name='Command with with word splitting in array', script='array=(\'1 2\' $(echo \'3 4\'))\nargv.py "${array[@...assertions=[Assertion(type='stdout', value="['1 2', '3', '4']", shells=None, variant=None)], line_number=31, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Command with with word splitting in array (line 31)
E           
E           stdout mismatch:
E             expected: "['1 2', '3', '4']"
E             actual:   "['1 2', '3 4']"
E           
E           Expected stdout: "['1 2', '3', '4']"
E           Actual stdout:   "['1 2', '3 4']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           array=('1 2' $(echo '3 4'))
E           argv.py "${array[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::space before ( in array initialization[L36]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c39b0>
test_file = 'array.test.sh'
test_case = TestCase(name='space before ( in array initialization', script="# NOTE: mksh accepts this, but bash doesn't\na= (1 '2 ...'mksh'], variant='OK'), Assertion(type='stdout', value='1', shells=['mksh'], variant='OK')], line_number=36, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: space before ( in array initialization (line 36)
E           
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: None
E           Actual stdout:   '1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           # NOTE: mksh accepts this, but bash doesn't
E           a= (1 '2 3')
E           echo $a
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::array with invalid token[L53]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c3b30>
test_file = 'array.test.sh'
test_case = TestCase(name='array with invalid token', script='a=(\n1\n&\n\'2 3\'\n)\nargv.py "${a[@]}"', assertions=[Assertion(typ...ells=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=53, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: array with invalid token (line 53)
E           
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: None
E           Actual stdout:   "['1', '2 3']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(
E           1
E           &
E           '2 3'
E           )
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Negative index and sparse array[L84]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c3ef0>
test_file = 'array.test.sh'
test_case = TestCase(name='Negative index and sparse array', script='a=(0 1 2 3 4)\nunset a[1]\nunset a[4]\necho "${a[@]}"\necho -...ut', value='0 2 3\n-1\n-2\n-3\n-4\n-5\n0 2 3 0\n0 2 3 42', shells=['mksh'], variant='BUG')], line_number=84, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Negative index and sparse array (line 84)
E           
E           stdout mismatch:
E             expected: '0 2 3\n-1 3\n-2 2\n-3\n-4 0\n-5\n0 2 30\n0 2 72'
E             actual:   '0 2 3\n-1 3\n-2 2\n-3\n-4 0\n-5\n0 0 2 3\n42 0 2 3'
E           
E           Expected stdout: '0 2 3\n-1 3\n-2 2\n-3\n-4 0\n-5\n0 2 30\n0 2 72'
E           Actual stdout:   '0 2 3\n-1 3\n-2 2\n-3\n-4 0\n-5\n0 0 2 3\n42 0 2 3\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(0 1 2 3 4)
E           unset a[1]
E           unset a[4]
E           echo "${a[@]}"
E           echo -1 ${a[-1]}
E           echo -2 ${a[-2]}
E           echo -3 ${a[-3]}
E           echo -4 ${a[-4]}
E           echo -5 ${a[-5]}
E           
E           a[-1]+=0  # append 0 on the end
E           echo ${a[@]}
E           (( a[-1] += 42 ))
E           echo ${a[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Negative index and sparse array[L121]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090c3fb0>
test_file = 'array.test.sh'
test_case = TestCase(name='Negative index and sparse array', script="a=(0 1)\nunset 'a[-1]'  # remove last element\na+=(2 3)\necho...ne), Assertion(type='stdout', value='0 0\n1 1\n2 2\n3 3', shells=['mksh'], variant='BUG')], line_number=121, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Negative index and sparse array (line 121)
E           
E           stdout mismatch:
E             expected: '0 0\n2 2\n3 3\n0'
E             actual:   '0 0\n1 1\n2 2\n3 3'
E           
E           Expected stdout: '0 0\n2 2\n3 3\n0'
E           Actual stdout:   '0 0\n1 1\n2 2\n3 3\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(0 1)
E           unset 'a[-1]'  # remove last element
E           a+=(2 3)
E           echo ${a[0]} $((a[0]))
E           echo ${a[1]} $((a[1]))
E           echo ${a[2]} $((a[2]))
E           echo ${a[3]} $((a[3]))
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[array.test.sh::Length after unset[L142]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e40b0>
test_file = 'array.test.sh'
test_case = TestCase(name='Length after unset', script='a=(0 1 2 3)\nunset a[-1]\necho len=${#a[@]}\nunset a[-1]\necho len=${#a[@]...ant=None), Assertion(type='stdout', value='len=4\nlen=4', shells=['mksh'], variant='BUG')], line_number=142, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Length after unset (line 142)
E           
E           stdout mismatch:
E             expected: 'len=3\nlen=2'
E             actual:   'len=4\nlen=4'
E           
E           Expected stdout: 'len=3\nlen=2'
E           Actual stdout:   'len=4\nlen=4\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(0 1 2 3)
E           unset a[-1]
E           echo len=${#a[@]}
E           unset a[-1]
E           echo len=${#a[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Retrieve index that is a command sub[L169]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e42f0>
test_file = 'array.test.sh'
test_case = TestCase(name='Retrieve index that is a command sub', script='a=(1 \'2 3\')\nargv.py "${a[$(echo 1)]}"', assertions=[Assertion(type='stdout', value="['2 3']", shells=None, variant=None)], line_number=169, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Retrieve index that is a command sub (line 169)
E           
E           stdout mismatch:
E             expected: "['2 3']"
E             actual:   "['1']"
E           
E           Expected stdout: "['2 3']"
E           Actual stdout:   "['1']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(1 '2 3')
E           argv.py "${a[$(echo 1)]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Interpolate array into array[L229]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e49b0>
test_file = 'array.test.sh'
test_case = TestCase(name='Interpolate array into array', script='a=(1 \'2 3\')\na=(0 "${a[@]}" \'4 5\')\nargv.py "${a[@]}"', asse...ns=[Assertion(type='stdout', value="['0', '1', '2 3', '4 5']", shells=None, variant=None)], line_number=229, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Interpolate array into array (line 229)
E           
E           stdout mismatch:
E             expected: "['0', '1', '2 3', '4 5']"
E             actual:   "['0', '0', '4 5']"
E           
E           Expected stdout: "['0', '1', '2 3', '4 5']"
E           Actual stdout:   "['0', '0', '4 5']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(1 '2 3')
E           a=(0 "${a[@]}" '4 5')
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Exporting array doesn't do anything, not even first element[L235]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e4a70>
test_file = 'array.test.sh'
test_case = TestCase(name="Exporting array doesn't do anything, not even first element", script="# bash parses, but doesn't execut...ant=None), Assertion(type='stdout', value='mystr\nNone\nNone', shells=None, variant=None)], line_number=235, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Exporting array doesn't do anything, not even first element (line 235)
E           
E           stdout mismatch:
E             expected: 'mystr\nNone\nNone'
E             actual:   'mystr\nmystr\nmystr'
E           
E           Expected stdout: 'mystr\nNone\nNone'
E           Actual stdout:   'mystr\nmystr\nmystr\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash parses, but doesn't execute.
E           # mksh gives syntax error -- parses differently with 'export'
E           # osh no longer parses this statically.
E           
E           export PYTHONPATH
E           
E           PYTHONPATH=mystr  # NOTE: in bash, this doesn't work afterward!
E           printenv.py PYTHONPATH
E           
E           PYTHONPATH=(myarray)
E           printenv.py PYTHONPATH
E           
E           PYTHONPATH=(a b c)
E           printenv.py PYTHONPATH
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Arrays can't be used as env bindings[L275]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e4bf0>
test_file = 'array.test.sh'
test_case = TestCase(name="Arrays can't be used as env bindings", script='# Hm bash it treats it as a string!\nA=a B=(b b) printen...['bash'], variant='OK'), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=275, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Arrays can't be used as env bindings (line 275)
E           
E           stdout mismatch:
E             expected: 'a\n(b b)'
E             actual:   'a\nNone'
E           
E           Expected stdout: 'a\n(b b)'
E           Actual stdout:   'a\nNone\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           # Hm bash it treats it as a string!
E           A=a B=(b b) printenv.py A B
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Associative arrays can't be used as env bindings either[L287]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e4cb0>
test_file = 'array.test.sh'
test_case = TestCase(name="Associative arrays can't be used as env bindings either", script='A=a B=([k]=v) printenv.py A B', asser...['bash'], variant='OK'), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=287, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Associative arrays can't be used as env bindings either (line 287)
E           
E           stdout mismatch:
E             expected: 'a\n([k]=v)'
E             actual:   'a\nNone'
E           
E           Expected stdout: 'a\n([k]=v)'
E           Actual stdout:   'a\nNone\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           A=a B=([k]=v) printenv.py A B
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Set element with var ref[L303]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e4e30>
test_file = 'array.test.sh'
test_case = TestCase(name='Set element with var ref', script='a=(1 \'2 3\')\ni=0\na[$i]=9\nargv.py "${a[@]}"', assertions=[Assertion(type='stdout', value="['9', '2 3']", shells=None, variant=None)], line_number=303, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Set element with var ref (line 303)
E           
E           stdout mismatch:
E             expected: "['9', '2 3']"
E             actual:   "['1', '2 3']"
E           
E           Expected stdout: "['9', '2 3']"
E           Actual stdout:   "['1', '2 3']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(1 '2 3')
E           i=0
E           a[$i]=9
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Set element with array ref[L310]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e4ef0>
test_file = 'array.test.sh'
test_case = TestCase(name='Set element with array ref', script='# This makes parsing a little more complex.  Anything can be insid...]}"', assertions=[Assertion(type='stdout', value="['1', '9']", shells=None, variant=None)], line_number=310, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Set element with array ref (line 310)
E           
E           stdout mismatch:
E             expected: "['1', '9']"
E             actual:   "['1', '2 3']"
E           
E           Expected stdout: "['1', '9']"
E           Actual stdout:   "['1', '2 3']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # This makes parsing a little more complex.  Anything can be inside [],
E           # including other [].
E           a=(1 '2 3')
E           i=(0 1)
E           a[${i[1]}]=9
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Set array item to array[L319]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e4fb0>
test_file = 'array.test.sh'
test_case = TestCase(name='Set array item to array', script='a=(1 2)\na[0]=(3 4)\necho "status=$?"', assertions=[Assertion(type='s...bash'], variant='BUG'), Assertion(type='status', value=0, shells=['bash'], variant='BUG')], line_number=319, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Set array item to array (line 319)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   'status=0'
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(1 2)
E           a[0]=(3 4)
E           echo "status=$?"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Slice of array with [@][L329]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e5070>
test_file = 'array.test.sh'
test_case = TestCase(name='Slice of array with [@]', script='# mksh doesn\'t support this syntax!  It\'s a bash extension.\na=(1 2..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='N-I')], line_number=329, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Slice of array with [@] (line 329)
E           
E           stdout mismatch:
E             expected: "['2', '3']"
E             actual:   "['2 3']"
E           
E           Expected stdout: "['2', '3']"
E           Actual stdout:   "['2 3']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # mksh doesn't support this syntax!  It's a bash extension.
E           a=(1 2 3)
E           argv.py "${a[@]:1:2}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Negative slice begin[L337]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e5130>
test_file = 'array.test.sh'
test_case = TestCase(name='Negative slice begin', script='# mksh doesn\'t support this syntax!  It\'s a bash extension.\n# NOTE: f..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='N-I')], line_number=337, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Negative slice begin (line 337)
E           
E           stdout mismatch:
E             expected: "['2', '3', '4', '5']"
E             actual:   "['']"
E           
E           Expected stdout: "['2', '3', '4', '5']"
E           Actual stdout:   "['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # mksh doesn't support this syntax!  It's a bash extension.
E           # NOTE: for some reason -2) has to be in parens?  Ah that's because it
E           # conflicts with :-!  That's silly.  You can also add a space.
E           a=(1 2 3 4 5)
E           argv.py "${a[@]:(-4)}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Negative slice length[L347]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e51f0>
test_file = 'array.test.sh'
test_case = TestCase(name='Negative slice length', script='a=(1 2 3 4 5)\nargv.py "${a[@]: 1: -3}"', assertions=[Assertion(type='s...s=None, variant=None), Assertion(type='stdout-json', value='', shells=None, variant=None)], line_number=347, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Negative slice length (line 347)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   "['2']"
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   "['2']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(1 2 3 4 5)
E           argv.py "${a[@]: 1: -3}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Slice with arithmetic[L353]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e52b0>
test_file = 'array.test.sh'
test_case = TestCase(name='Slice with arithmetic', script='a=(1 2 3)\ni=5\nargv.py "${a[@]:i-4:2}"', assertions=[Assertion(type='s..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='N-I')], line_number=353, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Slice with arithmetic (line 353)
E           
E           stdout mismatch:
E             expected: "['2', '3']"
E             actual:   "['']"
E           
E           Expected stdout: "['2', '3']"
E           Actual stdout:   "['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(1 2 3)
E           i=5
E           argv.py "${a[@]:i-4:2}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::glob within array yields separate elements[L381]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e55b0>
test_file = 'array.test.sh'
test_case = TestCase(name='glob within array yields separate elements', script='touch y.Y yy.Y\na=(*.Y)\nargv.py "${a[@]}"', assertions=[Assertion(type='stdout', value="['y.Y', 'yy.Y']", shells=None, variant=None)], line_number=381, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: glob within array yields separate elements (line 381)
E           
E           stdout mismatch:
E             expected: "['y.Y', 'yy.Y']"
E             actual:   "['*.Y']"
E           
E           Expected stdout: "['y.Y', 'yy.Y']"
E           Actual stdout:   "['*.Y']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch y.Y yy.Y
E           a=(*.Y)
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Stripping a whole array quoted[L419]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e5970>
test_file = 'array.test.sh'
test_case = TestCase(name='Stripping a whole array quoted', script='files=(\'foo.c\' \'sp ace.h\' \'bar.c\')\nargv.py "${files[@]%..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='N-I')], line_number=419, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Stripping a whole array quoted (line 419)
E           
E           stdout mismatch:
E             expected: "['foo', 'sp ace.h', 'bar']"
E             actual:   "['foo sp ace.h bar']"
E           
E           Expected stdout: "['foo', 'sp ace.h', 'bar']"
E           Actual stdout:   "['foo sp ace.h bar']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           files=('foo.c' 'sp ace.h' 'bar.c')
E           argv.py "${files[@]%.c}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Multiple subscripts not allowed[L427]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e5a30>
test_file = 'array.test.sh'
test_case = TestCase(name='Multiple subscripts not allowed', script='# NOTE: bash 4.3 had a bug where it ignored the bad subscript..., variant=None), Assertion(type='status', value=1, shells=['bash', 'mksh'], variant='OK')], line_number=427, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Multiple subscripts not allowed (line 427)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   "['123', '123']"
E           status mismatch: expected one of [2, 1], got 0
E           
E           Expected stdout: ''
E           Actual stdout:   "['123', '123']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           # NOTE: bash 4.3 had a bug where it ignored the bad subscript, but now it is
E           # fixed.
E           a=('123' '456')
E           argv.py "${a[0]}" "${a[0][0]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Length op, index op, then transform op is not allowed[L436]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e5af0>
test_file = 'array.test.sh'
test_case = TestCase(name='Length op, index op, then transform op is not allowed', script='a=(\'123\' \'456\')\necho "${#a[0]}" "$..., variant=None), Assertion(type='status', value=1, shells=['bash', 'mksh'], variant='OK')], line_number=436, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Length op, index op, then transform op is not allowed (line 436)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   '3 0'
E           status mismatch: expected one of [2, 1], got 0
E           
E           Expected stdout: ''
E           Actual stdout:   '3 0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           a=('123' '456')
E           echo "${#a[0]}" "${#a[0]/1/xxx}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::${mystr[@]} and ${mystr[*]} are no-ops[L443]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e5bb0>
test_file = 'array.test.sh'
test_case = TestCase(name='${mystr[@]} and ${mystr[*]} are no-ops', script="s='abc'\necho ${s[@]}\necho ${s[*]}", assertions=[Assertion(type='stdout', value='abc\nabc', shells=None, variant=None)], line_number=443, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${mystr[@]} and ${mystr[*]} are no-ops (line 443)
E           
E           stdout mismatch:
E             expected: 'abc\nabc'
E             actual:   ''
E           
E           Expected stdout: 'abc\nabc'
E           Actual stdout:   '\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           s='abc'
E           echo ${s[@]}
E           echo ${s[*]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Create a "user" array out of the argv array[L473]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e5d30>
test_file = 'array.test.sh'
test_case = TestCase(name='Create a "user" array out of the argv array', script='set -- \'a b\' \'c\'\narray1=(\'x y\' \'z\')\narr...ns=[Assertion(type='stdout', value="['x y', 'z', 'a b', 'c']", shells=None, variant=None)], line_number=473, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Create a "user" array out of the argv array (line 473)
E           
E           stdout mismatch:
E             expected: "['x y', 'z', 'a b', 'c']"
E             actual:   "['x y', 'z', 'a b c']"
E           
E           Expected stdout: "['x y', 'z', 'a b', 'c']"
E           Actual stdout:   "['x y', 'z', 'a b c']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- 'a b' 'c'
E           array1=('x y' 'z')
E           array2=("$@")
E           argv.py "${array1[@]}" "${array2[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Brace Expansion within Array[L486]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e5eb0>
test_file = 'array.test.sh'
test_case = TestCase(name='Brace Expansion within Array', script='a=(-{a,b} {c,d}-)\necho "${a[@]}"', assertions=[Assertion(type='stdout', value='-a -b c- d-', shells=None, variant=None)], line_number=486, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Brace Expansion within Array (line 486)
E           
E           stdout mismatch:
E             expected: '-a -b c- d-'
E             actual:   '-{a,b} {c,d}-'
E           
E           Expected stdout: '-a -b c- d-'
E           Actual stdout:   '-{a,b} {c,d}-\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(-{a,b} {c,d}-)
E           echo "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[array.test.sh::array default[L491]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e5f70>
test_file = 'array.test.sh'
test_case = TestCase(name='array default', script='default=(\'1 2\' \'3\')\nargv.py "${undef[@]:-${default[@]}}"', assertions=[Assertion(type='stdout', value="['1 2', '3']", shells=None, variant=None)], line_number=491, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: array default (line 491)
E           
E           stdout mismatch:
E             expected: "['1 2', '3']"
E             actual:   "['1 2 3']"
E           
E           Expected stdout: "['1 2', '3']"
E           Actual stdout:   "['1 2 3']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           default=('1 2' '3')
E           argv.py "${undef[@]:-${default[@]}}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Singleton Array Copy and Assign.  OSH can't index strings with ints[L496]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e6030>
test_file = 'array.test.sh'
test_case = TestCase(name="Singleton Array Copy and Assign.  OSH can't index strings with ints", script='a=( \'12 3\' )\nb=( "${a[...sertion(type='stdout', value='4 4\n1 1\n4 4\n1 1', shells=['bash', 'mksh'], variant='OK')], line_number=496, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Singleton Array Copy and Assign.  OSH can't index strings with ints (line 496)
E           
E           stdout mismatch:
E             expected: '4 4\n1 1\n4 4\n1 1'
E             actual:   '4 4\n1 1\n0 0\n0 0'
E           
E           Expected stdout: '4 4\n1 1\n4 4\n1 1'
E           Actual stdout:   '4 4\n1 1\n0 0\n0 0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           a=( '12 3' )
E           b=( "${a[@]}" )
E           c="${a[@]}"  # This decays it to a string
E           d=${a[*]}  # This decays it to a string
E           echo ${#a[0]} ${#b[0]}
E           echo ${#a[@]} ${#b[@]}
E           
E           # osh is intentionally stricter, and these fail.
E           echo ${#c[0]} ${#d[0]}
E           echo ${#c[@]} ${#d[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::declare -a / local -a is empty array[L521]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e60f0>
test_file = 'array.test.sh'
test_case = TestCase(name='declare -a / local -a is empty array', script='declare -a myarray\nargv.py "${myarray[@]}"\nmyarray+=(\...rtions=[Assertion(type='stdout', value="[]\n['x']\n[]\n['x']", shells=None, variant=None)], line_number=521, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: declare -a / local -a is empty array (line 521)
E           
E           stdout mismatch:
E             expected: "[]\n['x']\n[]\n['x']"
E             actual:   "[]\n['x']\n['x']\n['x', 'x']"
E           
E           Expected stdout: "[]\n['x']\n[]\n['x']"
E           Actual stdout:   "[]\n['x']\n['x']\n['x', 'x']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -a myarray
E           argv.py "${myarray[@]}"
E           myarray+=('x')
E           argv.py "${myarray[@]}"
E           
E           f() {
E             local -a myarray
E             argv.py "${myarray[@]}"
E             myarray+=('x')
E             argv.py "${myarray[@]}"
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Append sparse arrays[L568]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e6330>
test_file = 'array.test.sh'
test_case = TestCase(name='Append sparse arrays', script='a=()\n(( a[99]=1 ))\nb=()\n(( b[33]=2 ))\n(( b[66]=3 ))\na+=( "${b[@]}" ...rtion(type='stdout', value="['1', '2', '3']\n['1', '2', '3']", shells=None, variant=None)], line_number=568, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Append sparse arrays (line 568)
E           
E           stdout mismatch:
E             expected: "['1', '2', '3']\n['1', '2', '3']"
E             actual:   "['1', '2 3']\n['1', '2 3', '']"
E           
E           Expected stdout: "['1', '2', '3']\n['1', '2', '3']"
E           Actual stdout:   "['1', '2 3']\n['1', '2 3', '']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=()
E           (( a[99]=1 ))
E           b=()
E           (( b[33]=2 ))
E           (( b[66]=3 ))
E           a+=( "${b[@]}" )
E           argv.py "${a[@]}"
E           argv.py "${a[99]}" "${a[100]}" "${a[101]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Slice of sparse array with [@][L582]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e63f0>
test_file = 'array.test.sh'
test_case = TestCase(name='Slice of sparse array with [@]', script='# mksh doesn\'t support this syntax!  It\'s a bash extension.\..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='N-I')], line_number=582, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Slice of sparse array with [@] (line 582)
E           
E           stdout mismatch:
E             expected: "['1', '2']"
E             actual:   "['']"
E           
E           Expected stdout: "['1', '2']"
E           Actual stdout:   "['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # mksh doesn't support this syntax!  It's a bash extension.
E           (( a[33]=1 ))
E           (( a[66]=2 ))
E           (( a[99]=2 ))
E           argv.py "${a[@]:15:2}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Using an array itself as the index on LHS[L592]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e64b0>
test_file = 'array.test.sh'
test_case = TestCase(name='Using an array itself as the index on LHS', script='shopt -u strict_arith\na[a]=42\na[a]=99\nargv.py "$...Assertion(type='stdout', value="['42', '99', '42', '99', '']", shells=None, variant=None)], line_number=592, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Using an array itself as the index on LHS (line 592)
E           
E           stdout mismatch:
E             expected: "['42', '99', '42', '99', '']"
E             actual:   "['', '', '']"
E           
E           Expected stdout: "['42', '99', '42', '99', '']"
E           Actual stdout:   "['', '', '']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -u strict_arith
E           a[a]=42
E           a[a]=99
E           argv.py "${a[@]}" "${a[0]}" "${a[42]}" "${a[99]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Using an array itself as the index on RHS[L603]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e6570>
test_file = 'array.test.sh'
test_case = TestCase(name='Using an array itself as the index on RHS', script='shopt -u strict_arith\na=(1 2 3)\n(( x = a[a] ))\ne...hells=None, variant=None), Assertion(type='stdout', value='2', shells=None, variant=None)], line_number=603, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Using an array itself as the index on RHS (line 603)
E           
E           stdout mismatch:
E             expected: '2'
E             actual:   '1'
E           
E           Expected stdout: '2'
E           Actual stdout:   '1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -u strict_arith
E           a=(1 2 3)
E           (( x = a[a] ))
E           echo $x
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::a[$x$y] on LHS and RHS[L613]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e6630>
test_file = 'array.test.sh'
test_case = TestCase(name='a[$x$y] on LHS and RHS', script='x=1\ny=2\na[$x$y]=foo\n\n# not allowed by OSH parsing\n#echo ${a[$x$y]...{#a[@]}', assertions=[Assertion(type='stdout', value='foo\n1', shells=None, variant=None)], line_number=613, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: a[$x$y] on LHS and RHS (line 613)
E           
E           stdout mismatch:
E             expected: 'foo\n1'
E             actual:   '\n0'
E           
E           Expected stdout: 'foo\n1'
E           Actual stdout:   '\n0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x=1
E           y=2
E           a[$x$y]=foo
E           
E           # not allowed by OSH parsing
E           #echo ${a[$x$y]}
E           
E           echo ${a[12]}
E           echo ${#a[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Dynamic parsing of LHS a[$code]=value[L630]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e66f0>
test_file = 'array.test.sh'
test_case = TestCase(name='Dynamic parsing of LHS a[$code]=value', script='declare -a array\narray[x=1]=\'one\'\n\ncode=\'y=2\'\n#...dash'], variant='N-I'), Assertion(type='status', value=2, shells=['dash'], variant='N-I')], line_number=630, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Dynamic parsing of LHS a[$code]=value (line 630)
E           
E           stdout mismatch:
E             expected: "['one', 'two']\nx=1\ny=2"
E             actual:   '[]\nx=\ny='
E           
E           Expected stdout: "['one', 'two']\nx=1\ny=2"
E           Actual stdout:   '[]\nx=\ny=\n'
E           Expected stderr: None
E           Actual stderr:   'bash: array[x=1]=one: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -a array
E           array[x=1]='one'
E           
E           code='y=2'
E           #code='1+2'  # doesn't work either
E           array[$code]='two'
E           
E           argv.py "${array[@]}"
E           echo x=$x
E           echo y=$y
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::test -v a[i] with arith expressions[L722]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e69f0>
test_file = 'array.test.sh'
test_case = TestCase(name='test -v a[i] with arith expressions', script="array=(1 2 3 '')\n\ntest -v 'array[1+1]'\necho status=$?\...on(type='stdout', value='status=2\nstatus=2\n\ndbracket', shells=['mksh'], variant='N-I')], line_number=722, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: test -v a[i] with arith expressions (line 722)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=1\n\ndbracket\nstatus=0\nstatus=1'
E             actual:   'status=1\nstatus=1\n\ndbracket\nstatus=1\nstatus=1'
E           
E           Expected stdout: 'status=0\nstatus=1\n\ndbracket\nstatus=0\nstatus=1'
E           Actual stdout:   'status=1\nstatus=1\n\ndbracket\nstatus=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           array=(1 2 3 '')
E           
E           test -v 'array[1+1]'
E           echo status=$?
E           
E           test -v 'array[4+1]'
E           echo status=$?
E           
E           echo
E           echo dbracket
E           
E           [[ -v array[1+1] ]]
E           echo status=$?
E           
E           [[ -v array[4+1] ]]
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::More arith expressions in [[ -v array[expr]] ]][L759]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e6ab0>
test_file = 'array.test.sh'
test_case = TestCase(name='More arith expressions in [[ -v array[expr]] ]]', script="typeset -a array\narray=('' nonempty)\n\n# Th...ksh'], variant='N-I'), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=759, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: More arith expressions in [[ -v array[expr]] ]] (line 759)
E           
E           stdout mismatch:
E             expected: 'zero=0\none=0\ntwo=1\n---\nzero=0\none=0\ntwo=1\n---\nzero=0\none=0\ntwo=1'
E             actual:   'zero=1\none=1\ntwo=1\n---\nzero=1\none=1\ntwo=1\n---\nzero=1\none=1\ntwo=1'
E           
E           Expected stdout: 'zero=0\none=0\ntwo=1\n---\nzero=0\none=0\ntwo=1\n---\nzero=0\none=0\ntwo=1'
E           Actual stdout:   'zero=1\none=1\ntwo=1\n---\nzero=1\none=1\ntwo=1\n---\nzero=1\none=1\ntwo=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset -a array
E           array=('' nonempty)
E           
E           # This feels inconsistent with the rest of bash?
E           zero=0
E           
E           [[ -v array[zero+0] ]]
E           echo zero=$?
E           
E           [[ -v array[zero+1] ]]
E           echo one=$?
E           
E           [[ -v array[zero+2] ]]
E           echo two=$?
E           
E           echo ---
E           
E           i='0+0'
E           [[ -v array[i] ]]
E           echo zero=$?
E           
E           i='0+1'
E           [[ -v array[i] ]]
E           echo one=$?
E           
E           i='0+2'
E           [[ -v array[i] ]]
E           echo two=$?
E           
E           echo ---
E           
E           i='0+0'
E           [[ -v array[$i] ]]
E           echo zero=$?
E           
E           i='0+1'
E           [[ -v array[$i] ]]
E           echo one=$?
E           
E           i='0+2'
E           [[ -v array[$i] ]]
E           echo two=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Regression: Assigning with out-of-range negative index[L824]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e6b70>
test_file = 'array.test.sh'
test_case = TestCase(name='Regression: Assigning with out-of-range negative index', script='a=()\na[-1]=1\n\n\n\n# Note: mksh inte...ksh'], variant='N-I'), Assertion(type='stderr', value='', shells=['mksh'], variant='N-I')], line_number=824, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Regression: Assigning with out-of-range negative index (line 824)
E           
E           stderr mismatch:
E             expected: 'bash: line 2: a[-1]: bad array subscript'
E             actual:   ''
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   ''
E           Expected stderr: 'bash: line 2: a[-1]: bad array subscript'
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           a=()
E           a[-1]=1
E           
E           
E           
E           # Note: mksh interprets -1 as 0xFFFFFFFF
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Regression: Negative index in [[ -v a[index] ]][L847]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e6c30>
test_file = 'array.test.sh'
test_case = TestCase(name='Regression: Negative index in [[ -v a[index] ]]', script="a[0]=x\na[5]=y\na[10]=z\n[[ -v a[-1] ]] && ec...ksh'], variant='N-I'), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=847, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Regression: Negative index in [[ -v a[index] ]] (line 847)
E           
E           stdout mismatch:
E             expected: 'a has -1\na has -6\na has -11'
E             actual:   ''
E           
E           Expected stdout: 'a has -1\na has -6\na has -11'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           a[0]=x
E           a[5]=y
E           a[10]=z
E           [[ -v a[-1] ]] && echo 'a has -1'
E           [[ -v a[-2] ]] && echo 'a has -2'
E           [[ -v a[-5] ]] && echo 'a has -5'
E           [[ -v a[-6] ]] && echo 'a has -6'
E           [[ -v a[-10] ]] && echo 'a has -10'
E           [[ -v a[-11] ]] && echo 'a has -11'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Regression: Negative out-of-range index in [[ -v a[index] ]][L869]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e6cf0>
test_file = 'array.test.sh'
test_case = TestCase(name='Regression: Negative out-of-range index in [[ -v a[index] ]]', script='e=()\n[[ -v e[-1] ]] && echo \'e...>[2]: syntax error: 'e[-1]' unexpected operator/operand", shells=['mksh'], variant='N-I')], line_number=869, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Regression: Negative out-of-range index in [[ -v a[index] ]] (line 869)
E           
E           stderr mismatch:
E             expected: 'bash: line 2: e: bad array subscript'
E             actual:   ''
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: 'bash: line 2: e: bad array subscript'
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           e=()
E           [[ -v e[-1] ]] && echo 'e has -1'
E           
E           
E           
E           
E           
E           # #### a+=() modifies existing instance of BashArray
E           # case $SH in mksh|bash) exit ;; esac
E           #
E           # a=(1 2 3)
E           # var b = a
E           # a+=(4 5)
E           # echo "a=(${a[*]})"
E           # echo "b=(${b[*]})"
E           #
E           # ## STDOUT:
E           # a=(1 2 3 4 5)
E           # b=(1 2 3 4 5)
E           # ## END
E           #
E           # ## N-I mksh/bash STDOUT:
E           # ## END
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Regression: unset a[-2]: out-of-bound negative index should cause error[L908]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e6db0>
test_file = 'array.test.sh'
test_case = TestCase(name='Regression: unset a[-2]: out-of-bound negative index should cause error', script="case $SH in mksh) exi...ksh'], variant='N-I'), Assertion(type='stderr', value='', shells=['mksh'], variant='N-I')], line_number=908, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Regression: unset a[-2]: out-of-bound negative index should cause error (line 908)
E           
E           stderr mismatch:
E             expected: 'bash: line 4: unset: [-2]: bad array subscript'
E             actual:   ''
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   ''
E           Expected stderr: 'bash: line 4: unset: [-2]: bad array subscript'
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           a=(1)
E           unset -v 'a[-2]'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Regression: Out-of-bound negative offset for ${a[@]:offset}[L932]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e6e70>
test_file = 'array.test.sh'
test_case = TestCase(name='Regression: Out-of-bound negative offset for ${a[@]:offset}', script='case $SH in mksh) exit ;; esac\n\...s=None, variant=None), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=932, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Regression: Out-of-bound negative offset for ${a[@]:offset} (line 932)
E           
E           stdout mismatch:
E             expected: 'a=(1 2 3 4)\nbegin=-1 -> (4)\nbegin=-2 -> (3 4)\nbegin=-3 -> (2 3 4)\nbegin=-4 -> (1 2 3 4)\nbegin=-5 -> ()'
E             actual:   'a=(1 2 3 4)\nbegin=-1 -> (4)\nbegin=-2 -> (3 4)\nbegin=-3 -> (2 3 4)\nbegin=-4 -> (1 2 3 4)\nbegin=-5 -> (1 2 3 4)'
E           
E           Expected stdout: 'a=(1 2 3 4)\nbegin=-1 -> (4)\nbegin=-2 -> (3 4)\nbegin=-3 -> (2 3 4)\nbegin=-4 -> (1 2 3 4)\nbegin=-5 -> ()'
E           Actual stdout:   'a=(1 2 3 4)\nbegin=-1 -> (4)\nbegin=-2 -> (3 4)\nbegin=-3 -> (2 3 4)\nbegin=-4 -> (1 2 3 4)\nbegin=-5 -> (1 2 3 4)\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           a=(1 2 3 4)
E           echo "a=(${a[*]})"
E           echo "begin=-1 -> (${a[*]: -1})"
E           echo "begin=-2 -> (${a[*]: -2})"
E           echo "begin=-3 -> (${a[*]: -3})"
E           echo "begin=-4 -> (${a[*]: -4})"
E           echo "begin=-5 -> (${a[*]: -5})"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Regression: Array length after unset[L956]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e6f30>
test_file = 'array.test.sh'
test_case = TestCase(name='Regression: Array length after unset', script='case $SH in mksh) exit ;; esac\n\na=(x)\na[9]=y\necho "l...s=None, variant=None), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=956, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Regression: Array length after unset (line 956)
E           
E           stdout mismatch:
E             expected: 'len 2;\nlen 1;\nlast x;'
E             actual:   'len 2;\nlen 2;\nlast y;'
E           
E           Expected stdout: 'len 2;\nlen 1;\nlast x;'
E           Actual stdout:   'len 2;\nlen 2;\nlast y;\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           a=(x)
E           a[9]=y
E           echo "len ${#a[@]};"
E           
E           unset -v 'a[-1]'
E           echo "len ${#a[@]};"
E           echo "last ${a[@]: -1};"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Regression: ${a[@]@Q} crash with `a[0]=x a[2]=y`[L977]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e6ff0>
test_file = 'array.test.sh'
test_case = TestCase(name='Regression: ${a[@]@Q} crash with `a[0]=x a[2]=y`', script='case $SH in mksh) exit ;; esac\n\na[0]=x\na[...bash'], variant='OK'), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=977, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Regression: ${a[@]@Q} crash with `a[0]=x a[2]=y` (line 977)
E           
E           stdout mismatch:
E             expected: "quoted = ('x' 'y')"
E             actual:   "quoted = ('x y')"
E           
E           Expected stdout: "quoted = ('x' 'y')"
E           Actual stdout:   "quoted = ('x y')\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           a[0]=x
E           a[2]=y
E           echo "quoted = (${a[@]@Q})"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[array.test.sh::Regression: silent out-of-bound negative index in ${a[-2]} and $((a[-2]))[L996]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e70b0>
test_file = 'array.test.sh'
test_case = TestCase(name='Regression: silent out-of-bound negative index in ${a[-2]} and $((a[-2]))', script='case $SH in mksh) e...ksh'], variant='N-I'), Assertion(type='stderr', value='', shells=['mksh'], variant='N-I')], line_number=996, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Regression: silent out-of-bound negative index in ${a[-2]} and $((a[-2])) (line 996)
E           
E           stderr mismatch:
E             expected: 'bash: line 4: a: bad array subscript\nbash: line 6: a: bad array subscript'
E             actual:   ''
E           
E           Expected stdout: '[]\n0\n[0]\n0'
E           Actual stdout:   '[]\n0\n[0]\n0\n'
E           Expected stderr: 'bash: line 4: a: bad array subscript\nbash: line 6: a: bad array subscript'
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           a=(x)
E           echo "[${a[-2]}]"
E           echo $?
E           echo "[$((a[-2]))]"
E           echo $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-deferred.test.sh::typeset -a a[1]=a a[3]=c[L16]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e7230>
test_file = 'assign-deferred.test.sh'
test_case = TestCase(name='typeset -a a[1]=a a[3]=c', script='# declare works the same way in bash, but not mksh.\n# spaces are NO...@]}"', assertions=[Assertion(type='stdout', value="['x', 'z']", shells=None, variant=None)], line_number=16, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: typeset -a a[1]=a a[3]=c (line 16)
E           
E           stdout mismatch:
E             expected: "['x', 'z']"
E             actual:   '[]'
E           
E           Expected stdout: "['x', 'z']"
E           Actual stdout:   '[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # declare works the same way in bash, but not mksh.
E           # spaces are NOT allowed here.
E           typeset -a a[1*1]=x a[1+2]=z
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-deferred.test.sh::local a[3]=4[L23]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e72f0>
test_file = 'assign-deferred.test.sh'
test_case = TestCase(name='local a[3]=4', script='f() {\n  local a[3]=4 a[5]=6\n  echo status=$?\n  argv.py "${!a[@]}" "${a[@]}"\n...ssertion(type='stdout', value="status=0\n['3', '5', '4', '6']", shells=None, variant=None)], line_number=23, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: local a[3]=4 (line 23)
E           
E           stdout mismatch:
E             expected: "status=0\n['3', '5', '4', '6']"
E             actual:   'status=1\n[]'
E           
E           Expected stdout: "status=0\n['3', '5', '4', '6']"
E           Actual stdout:   'status=1\n[]\n'
E           Expected stderr: None
E           Actual stderr:   "bash: local: 'a[3]': not a valid identifier\n"
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E             local a[3]=4 a[5]=6
E             echo status=$?
E             argv.py "${!a[@]}" "${a[@]}"
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-deferred.test.sh::is 'builtin' prefix and array allowed?  OSH is smarter[L95]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e76b0>
test_file = 'assign-deferred.test.sh'
test_case = TestCase(name="is 'builtin' prefix and array allowed?  OSH is smarter", script='builtin typeset a=(1 2 3)\necho len=${...h'], variant='OK'), Assertion(type='stdout-json', value='', shells=['bash'], variant='OK')], line_number=95, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: is 'builtin' prefix and array allowed?  OSH is smarter (line 95)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'len=0'
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'len=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           builtin typeset a=(1 2 3)
E           echo len=${#a[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-deferred.test.sh::is 'command' prefix and array allowed?  OSH is smarter[L106]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e7770>
test_file = 'assign-deferred.test.sh'
test_case = TestCase(name="is 'command' prefix and array allowed?  OSH is smarter", script='command typeset a=(1 2 3)\necho len=${...'], variant='OK'), Assertion(type='stdout-json', value='', shells=['bash'], variant='OK')], line_number=106, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: is 'command' prefix and array allowed?  OSH is smarter (line 106)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'len=0'
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'len=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           command typeset a=(1 2 3)
E           echo len=${#a[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-dialects.test.sh::test -v with arrays[L39]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e79b0>
test_file = 'assign-dialects.test.sh'
test_case = TestCase(name='test -v with arrays', script='typeset -a a\n\ntest -v a\necho a=$?\ntest -v \'a[0]\'\necho "a[0]=$?"\ne...', value='a=2\na[0]=2\n\na=2\na[0]=2\n\na[1]=2\na[x]=2\n', shells=['mksh'], variant='BUG')], line_number=39, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: test -v with arrays (line 39)
E           
E           stdout mismatch:
E             expected: 'a=1\na[0]=1\n\na=0\na[0]=0\n\na[1]=1\na[x]=0'
E             actual:   'a=1\na[0]=1\n\na=1\na[0]=0\n\na[1]=1\na[x]=1'
E           
E           Expected stdout: 'a=1\na[0]=1\n\na=0\na[0]=0\n\na[1]=1\na[x]=0\n'
E           Actual stdout:   'a=1\na[0]=1\n\na=1\na[0]=0\n\na[1]=1\na[x]=1\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset -a a
E           
E           test -v a
E           echo a=$?
E           test -v 'a[0]'
E           echo "a[0]=$?"
E           echo
E           
E           a[0]=1
E           
E           test -v a
E           echo a=$?
E           test -v 'a[0]'
E           echo "a[0]=$?"
E           echo
E           
E           test -v 'a[1]'
E           echo "a[1]=$?"
E           
E           # stupid rule about undefined 'x'
E           test -v 'a[x]'
E           echo "a[x]=$?"
E           echo
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-dialects.test.sh::test -v with assoc arrays[L89]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e7a70>
test_file = 'assign-dialects.test.sh'
test_case = TestCase(name='test -v with assoc arrays', script='typeset -A A\n\ntest -v A\necho A=$?\ntest -v \'A[0]\'\necho "A[0]=...', value='A=2\nA[0]=2\n\nA=2\nA[0]=2\n\nA[1]=2\nA[x]=2\n', shells=['mksh'], variant='BUG')], line_number=89, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: test -v with assoc arrays (line 89)
E           
E           stdout mismatch:
E             expected: 'A=1\nA[0]=1\n\nA=0\nA[0]=0\n\nA[1]=1\nA[x]=1'
E             actual:   'A=1\nA[0]=1\n\nA=1\nA[0]=1\n\nA[1]=1\nA[x]=1'
E           
E           Expected stdout: 'A=1\nA[0]=1\n\nA=0\nA[0]=0\n\nA[1]=1\nA[x]=1\n'
E           Actual stdout:   'A=1\nA[0]=1\n\nA=1\nA[0]=1\n\nA[1]=1\nA[x]=1\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset -A A
E           
E           test -v A
E           echo A=$?
E           test -v 'A[0]'
E           echo "A[0]=$?"
E           echo
E           
E           A['0']=x
E           
E           test -v A
E           echo A=$?
E           test -v 'A[0]'
E           echo "A[0]=$?"
E           echo
E           
E           test -v 'A[1]'
E           echo "A[1]=$?"
E           
E           # stupid rule about undefined 'x'
E           test -v 'A[x]'
E           echo "A[x]=$?"
E           echo
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[assign-extended.test.sh::declare[L97]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e7ef0>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='declare', script="test_var1=111\nreadonly test_var2=222\nexport test_var3=333\ndeclare -n test_var4=tes...et -r test_var2\ntypeset -x test_var3\ntypeset test_var5', shells=['mksh'], variant='N-I')], line_number=97, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: declare (line 97)
E           
E           stdout mismatch:
E             expected: '[declare]\ntest_var1=111\ntest_var2=222\ntest_var3=333\ntest_var4=test_var1\ntest_var5=555\n[readonly]\ndeclare -r test_var2="222"\n[export]\ndeclare -x test_var3="333"\n[local]\ntest_var5=555'
E             actual:   '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\ndeclare -x __readonly__="test_var2"\ndeclare -x test_var1="111"\ndeclare -x test_var2="222"\ndeclare -x test_var3="333"\ndeclare -x test_var5="555"\n[local]'
E           
E           Expected stdout: '[declare]\ntest_var1=111\ntest_var2=222\ntest_var3=333\ntest_var4=test_var1\ntest_var5=555\n[readonly]\ndeclare -r test_var2="222"\n[export]\ndeclare -x test_var3="333"\n[local]\ntest_var5=555'
E           Actual stdout:   '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\ndeclare -x __readonly__="test_var2"\ndeclare -x test_var1="111"\ndeclare -x test_var2="222"\ndeclare -x test_var3="333"\ndeclare -x test_var5="555"\n[local]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           test_var1=111
E           readonly test_var2=222
E           export test_var3=333
E           declare -n test_var4=test_var1
E           f1() {
E             local test_var5=555
E             {
E               echo '[declare]'
E               declare
E               echo '[readonly]'
E               readonly
E               echo '[export]'
E               export
E               echo '[local]'
E               local
E             } | grep -E '^\[|^\b.*test_var.\b'
E           }
E           f1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::declare -p[L157]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1090e7fb0>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='declare -p', script='# BUG: bash doesn\'t output flags with "local -p", which seems to contradict\n#   ...r2=222\ntypeset -x test_var3=333\ntypeset test_var5=555', shells=['mksh'], variant='N-I')], line_number=157, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: declare -p (line 157)
E           
E           stdout mismatch:
E             expected: '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -x test_var3="333"\ndeclare -n test_var4="test_var1"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\ndeclare -x test_var3="333"\n[local]\ntest_var5=555'
E             actual:   '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\n[local]'
E           
E           Expected stdout: '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -x test_var3="333"\ndeclare -n test_var4="test_var1"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\ndeclare -x test_var3="333"\n[local]\ntest_var5=555'
E           Actual stdout:   '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\n[local]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # BUG: bash doesn't output flags with "local -p", which seems to contradict
E           #   with manual.
E           test_var1=111
E           readonly test_var2=222
E           export test_var3=333
E           declare -n test_var4=test_var1
E           f1() {
E             local test_var5=555
E             {
E               echo '[declare]'
E               declare -p
E               echo '[readonly]'
E               readonly -p
E               echo '[export]'
E               export -p
E               echo '[local]'
E               local -p
E             } | grep -E '^\[|^\b.*test_var.\b'
E           }
E           f1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::declare -p var[L264]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910c170>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='declare -p var', script="# BUG? bash doesn't output anything for 'local/readonly -p var', which seems t..., Assertion(type='stdout', value='[declare]\n[readonly]', shells=['mksh'], variant='N-I')], line_number=264, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: declare -p var (line 264)
E           
E           stdout mismatch:
E             expected: '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -x test_var3="333"\ndeclare -n test_var4="test_var1"\ndeclare -- test_var5="555"\n[readonly]\n[export]\n[local]'
E             actual:   '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -n test_var4="test_var1"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\n[local]'
E           
E           Expected stdout: '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -x test_var3="333"\ndeclare -n test_var4="test_var1"\ndeclare -- test_var5="555"\n[readonly]\n[export]\n[local]'
E           Actual stdout:   '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -n test_var4="test_var1"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\n[local]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # BUG? bash doesn't output anything for 'local/readonly -p var', which seems to
E           #   contradict with manual.  Besides, 'export -p var' is not described in
E           #   manual
E           test_var1=111
E           readonly test_var2=222
E           export test_var3=333
E           declare -n test_var4=test_var1
E           f1() {
E             local test_var5=555
E             {
E               echo '[declare]'
E               declare -p test_var{0..5}
E               echo '[readonly]'
E               readonly -p test_var{0..5}
E               echo '[export]'
E               export -p test_var{0..5}
E               echo '[local]'
E               local -p test_var{0..5}
E             } | grep -E '^\[|^\b.*test_var.\b'
E           }
E           f1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::declare -p arr[L316]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910c230>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='declare -p arr', script="test_arr1=()\ndeclare -a test_arr2=()\ndeclare -A test_arr3=()\ntest_arr4=(1 2...mksh'], variant='N-I'), Assertion(type='status', value=1, shells=['mksh'], variant='N-I')], line_number=316, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: declare -p arr (line 316)
E           
E           stdout mismatch:
E             expected: 'declare -a test_arr1=()\ndeclare -a test_arr2=()\ndeclare -A test_arr3=()\ndeclare -a test_arr4=([0]="1" [1]="2" [2]="3")\ndeclare -a test_arr5=([0]="1" [1]="2" [2]="3")\ndeclare -A test_arr6=([a]="1" [b]="2" [c]="3" )\ndeclare -a test_arr7=([3]="foo")'
E             actual:   'declare -- test_arr1=""\ndeclare -a test_arr2=()\ndeclare -A test_arr3=()\ndeclare -a test_arr4=([0]="1" [1]="2" [2]="3")\ndeclare -a test_arr5=([0]="1" [1]="2" [2]="3")\ndeclare -A test_arr6=([\'a\']="1" [\'b\']="2" [\'c\']="3")\ndeclare -a test_arr7=([3]="foo")'
E           
E           Expected stdout: 'declare -a test_arr1=()\ndeclare -a test_arr2=()\ndeclare -A test_arr3=()\ndeclare -a test_arr4=([0]="1" [1]="2" [2]="3")\ndeclare -a test_arr5=([0]="1" [1]="2" [2]="3")\ndeclare -A test_arr6=([a]="1" [b]="2" [c]="3" )\ndeclare -a test_arr7=([3]="foo")'
E           Actual stdout:   'declare -- test_arr1=""\ndeclare -a test_arr2=()\ndeclare -A test_arr3=()\ndeclare -a test_arr4=([0]="1" [1]="2" [2]="3")\ndeclare -a test_arr5=([0]="1" [1]="2" [2]="3")\ndeclare -A test_arr6=([\'a\']="1" [\'b\']="2" [\'c\']="3")\ndeclare -a test_arr7=([3]="foo")\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           test_arr1=()
E           declare -a test_arr2=()
E           declare -A test_arr3=()
E           test_arr4=(1 2 3)
E           declare -a test_arr5=(1 2 3)
E           declare -A test_arr6=(['a']=1 ['b']=2 ['c']=3)
E           test_arr7=()
E           test_arr7[3]=foo
E           declare -p test_arr{1..7}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::declare -pnrx[L374]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910c3b0>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='declare -pnrx', script="test_var1=111\nreadonly test_var2=222\nexport test_var3=333\ndeclare -n test_va...ut', value='[declare -pn]\n[declare -pr]\n[declare -px]', shells=['mksh'], variant='N-I')], line_number=374, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: declare -pnrx (line 374)
E           
E           stdout mismatch:
E             expected: '[declare -pn]\ndeclare -n test_var4="test_var1"\n[declare -pr]\ndeclare -r test_var2="222"\n[declare -px]\ndeclare -x test_var3="333"'
E             actual:   '[declare -pn]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -- test_var5="555"\n[declare -pr]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -- test_var5="555"\n[declare -px]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -- test_var5="555"'
E           
E           Expected stdout: '[declare -pn]\ndeclare -n test_var4="test_var1"\n[declare -pr]\ndeclare -r test_var2="222"\n[declare -px]\ndeclare -x test_var3="333"'
E           Actual stdout:   '[declare -pn]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -- test_var5="555"\n[declare -pr]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -- test_var5="555"\n[declare -px]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -- test_var5="555"\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           test_var1=111
E           readonly test_var2=222
E           export test_var3=333
E           declare -n test_var4=test_var1
E           f1() {
E             local test_var5=555
E             {
E               echo '[declare -pn]'
E               declare -pn
E               echo '[declare -pr]'
E               declare -pr
E               echo '[declare -px]'
E               declare -px
E             } | grep -E '^\[|^\b.*test_var.\b'
E           }
E           f1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::declare -paA[L413]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910c470>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='declare -paA', script="declare -a test_var6=()\ndeclare -A test_var7=()\nf1() {\n  {\n    echo '[declar...mksh'], variant='N-I'), Assertion(type='status', value=1, shells=['mksh'], variant='N-I')], line_number=413, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: declare -paA (line 413)
E           
E           stdout mismatch:
E             expected: '[declare -pa]\ndeclare -a test_var6=()\n[declare -pA]\ndeclare -A test_var7=()'
E             actual:   '[declare -pa]\ndeclare -a test_var6=()\ndeclare -A test_var7=()\n[declare -pA]\ndeclare -a test_var6=()\ndeclare -A test_var7=()'
E           
E           Expected stdout: '[declare -pa]\ndeclare -a test_var6=()\n[declare -pA]\ndeclare -A test_var7=()'
E           Actual stdout:   '[declare -pa]\ndeclare -a test_var6=()\ndeclare -A test_var7=()\n[declare -pA]\ndeclare -a test_var6=()\ndeclare -A test_var7=()\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -a test_var6=()
E           declare -A test_var7=()
E           f1() {
E             {
E               echo '[declare -pa]'
E               declare -pa
E               echo '[declare -pA]'
E               declare -pA
E             } | grep -E '^\[|^\b.*test_var.\b'
E           }
E           f1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::ble.sh: eval -- "$(declare -p var arr)"[L529]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910c770>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='ble.sh: eval -- "$(declare -p var arr)"', script='# This illustrates an example usage of "eval & declar...mksh'], variant='N-I'), Assertion(type='status', value=1, shells=['mksh'], variant='N-I')], line_number=529, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ble.sh: eval -- "$(declare -p var arr)" (line 529)
E           
E           stdout mismatch:
E             expected: 'sum=21\narr[0]=a2\narr[1]=a5\narr[2]=a8\narr[3]=a10'
E             actual:   'sum=0'
E           
E           Expected stdout: 'sum=21\narr[0]=a2\narr[1]=a5\narr[2]=a8\narr[3]=a10'
E           Actual stdout:   'sum=0\n'
E           Expected stderr: None
E           Actual stderr:   'bash: --: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # This illustrates an example usage of "eval & declare" for exporting
E           # multiple variables from $().
E           eval -- "$(
E             printf '%s\n' a{1..10} | {
E               sum=0 i=0 arr=()
E               while read line; do
E                 ((sum+=${#line},i++))
E                 arr[$((i/3))]=$line
E               done
E               declare -p sum arr
E             })"
E           echo sum=$sum
E           for ((i=0;i<${#arr[@]};i++)); do
E             echo "arr[$i]=${arr[i]}"
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::declare -p and value.Undef[L555]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910c830>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='declare -p and value.Undef', script="# This is a regression for a crash\n# But actually there is also a...ksh'], variant='N-I'), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=555, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: declare -p and value.Undef (line 555)
E           
E           stdout mismatch:
E             expected: 'declare -- x\ndeclare -- x'
E             actual:   'declare -- x=""'
E           
E           Expected stdout: 'declare -- x\ndeclare -- x'
E           Actual stdout:   'declare -- x=""\n'
E           Expected stderr: None
E           Actual stderr:   'bash: declare: x: not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # This is a regression for a crash
E           # But actually there is also an incompatibility -- we don't print anything
E           
E           declare x
E           declare -p x
E           
E           function f { local x; declare -p x; }
E           x=1
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::eval -- "$(declare -p arr)" (restore arrays w/ unset elements)[L575]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910c8f0>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='eval -- "$(declare -p arr)" (restore arrays w/ unset elements)', script='arr=(1 2 3)\neval -- "$(arr=()...mksh'], variant='N-I'), Assertion(type='status', value=1, shells=['mksh'], variant='N-I')], line_number=575, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: eval -- "$(declare -p arr)" (restore arrays w/ unset elements) (line 575)
E           
E           stdout mismatch:
E             expected: 'arr[0]: unset\narr[1]: unset\narr[2]: unset\narr[3]: set ... []\narr[4]: set ... [foo]'
E             actual:   'arr[0]: unset\narr[1]: unset\narr[2]: unset\narr[3]: unset\narr[4]: unset'
E           
E           Expected stdout: 'arr[0]: unset\narr[1]: unset\narr[2]: unset\narr[3]: set ... []\narr[4]: set ... [foo]'
E           Actual stdout:   'arr[0]: unset\narr[1]: unset\narr[2]: unset\narr[3]: unset\narr[4]: unset\n'
E           Expected stderr: None
E           Actual stderr:   'bash: --: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           arr=(1 2 3)
E           eval -- "$(arr=(); arr[3]= arr[4]=foo; declare -p arr)"
E           for i in {0..4}; do
E             echo "arr[$i]: ${arr[$i]+set ... [}${arr[$i]-unset}${arr[$i]+]}"
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::declare -p UNDEF (and typeset) -- prints something to stderr[L591]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910c9b0>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='declare -p UNDEF (and typeset) -- prints something to stderr', script="x=42\nreadonly x\nexport x\n\nde...out', value='typeset -x -r x=42\n 1 de\n 0 ty\n 1 total', shells=['mksh'], variant='N-I')], line_number=591, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: declare -p UNDEF (and typeset) -- prints something to stderr (line 591)
E           
E           stdout mismatch:
E             expected: 'declare -rx x="42"\ndeclare -rx x="42"\n  2 de\n  2 ty\n  4 total'
E             actual:   'declare -r x="42"\ndeclare -r x="42"\n      0 de\n      0 ty\n      0 total'
E           
E           Expected stdout: 'declare -rx x="42"\ndeclare -rx x="42"\n  2 de\n  2 ty\n  4 total'
E           Actual stdout:   'declare -r x="42"\ndeclare -r x="42"\n      0 de\n      0 ty\n      0 total\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x=42
E           readonly x
E           export x
E           
E           declare -p x undef1 undef2 2> de
E           
E           typeset -p x undef1 undef2 2> ty
E           
E           # readonly -p and export -p don't accept args!  They only print all
E           #
E           # These do not accept args
E           # readonly -p x undef1 undef2 2> re
E           # export -p x undef1 undef2 2> ex
E           
E           f() {
E             # it behaves weird with x
E             #local -p undef1 undef2 2>lo
E             local -p a b b>lo
E             #local -p x undef1 undef2 2> lo
E           }
E           # local behaves differently in bash 4.4 and bash 5, not specifying now
E           # f
E           # files='de ty lo'
E           
E           files='de ty'
E           
E           wc -l $files
E           #cat $files
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::typeset -r makes a string readonly[L688]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910cbf0>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='typeset -r makes a string readonly', script="typeset -r s1='12'\ntypeset -r s2='34'\n\ns1='c'\necho sta...atus=1\nstatus=1\nstatus=1\nstatus=1\nstatus=1\nstatus=1', shells=['bash'], variant='OK')], line_number=688, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: typeset -r makes a string readonly (line 688)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1\nstatus=1\nstatus=1\nstatus=1\nstatus=1'
E             actual:   'status=0\nstatus=0\nstatus=0\nstatus=0\nstatus=1\nstatus=1'
E           
E           Expected stdout: 'status=1\nstatus=1\nstatus=1\nstatus=1\nstatus=1\nstatus=1'
E           Actual stdout:   'status=0\nstatus=0\nstatus=0\nstatus=0\nstatus=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: unset: s1: cannot unset: readonly variable\nbash: unset: s2: cannot unset: readonly variable\n'
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset -r s1='12'
E           typeset -r s2='34'
E           
E           s1='c'
E           echo status=$?
E           s2='d'
E           echo status=$?
E           
E           s1+='e'
E           echo status=$?
E           s2+='f'
E           echo status=$?
E           
E           unset s1
E           echo status=$?
E           unset s2
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::typeset -ar makes it readonly[L720]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910ccb0>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='typeset -ar makes it readonly', script="typeset -a -r array1=(1 2)\ntypeset -ar array2=(3 4)\n\narray1=..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='N-I')], line_number=720, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: typeset -ar makes it readonly (line 720)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1\nstatus=1\nstatus=1\nstatus=1\nstatus=1'
E             actual:   'status=0\nstatus=0\nstatus=0\nstatus=0\nstatus=1\nstatus=1'
E           
E           Expected stdout: 'status=1\nstatus=1\nstatus=1\nstatus=1\nstatus=1\nstatus=1'
E           Actual stdout:   'status=0\nstatus=0\nstatus=0\nstatus=0\nstatus=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: unset: array1: cannot unset: readonly variable\nbash: unset: array2: cannot unset: readonly variable\n'
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset -a -r array1=(1 2)
E           typeset -ar array2=(3 4)
E           
E           array1=('c')
E           echo status=$?
E           array2=('d')
E           echo status=$?
E           
E           array1+=('e')
E           echo status=$?
E           array2+=('f')
E           echo status=$?
E           
E           unset array1
E           echo status=$?
E           unset array2
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::Multiple assignments / array assignments on a line[L760]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910ce30>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='Multiple assignments / array assignments on a line', script='a=1 b[0+0]=2 c=3\necho $a ${b[@]} $c', assertions=[Assertion(type='stdout', value='1 2 3', shells=None, variant=None)], line_number=760, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Multiple assignments / array assignments on a line (line 760)
E           
E           stdout mismatch:
E             expected: '1 2 3'
E             actual:   '1 3'
E           
E           Expected stdout: '1 2 3'
E           Actual stdout:   '1 3\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=1 b[0+0]=2 c=3
E           echo $a ${b[@]} $c
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::syntax error in array assignment[L782]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910cfb0>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='syntax error in array assignment', script='a=x b[0+]=y c=z\necho $a $b $c', assertions=[Assertion(type=...['mksh'], variant='OK'), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=782, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: syntax error in array assignment (line 782)
E           
E           stdout mismatch:
E             expected: 'x'
E             actual:   'x z'
E           
E           Expected stdout: 'x'
E           Actual stdout:   'x z\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           a=x b[0+]=y c=z
E           echo $a $b $c
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::declare -g (bash-specific; bash-completion uses it)[L794]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910d070>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='declare -g (bash-specific; bash-completion uses it)', script='f() {\n  declare -g G=42\n  declare L=99\...mksh'], variant='N-I'), Assertion(type='status', value=1, shells=['mksh'], variant='N-I')], line_number=794, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: declare -g (bash-specific; bash-completion uses it) (line 794)
E           
E           stdout mismatch:
E             expected: "['42', '']\n['bar', '']\n['ev2']"
E             actual:   "['42', '99']\n['bar', 'Eggs']\n['ev2']"
E           
E           Expected stdout: "['42', '']\n['bar', '']\n['ev2']"
E           Actual stdout:   "['42', '99']\n['bar', 'Eggs']\n['ev2']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E             declare -g G=42
E             declare L=99
E           
E             declare -Ag dict
E             dict["foo"]=bar
E           
E             declare -A localdict
E             localdict["spam"]=Eggs
E           
E             # For bash-completion
E             eval 'declare -Ag ev'
E             ev["ev1"]=ev2
E           }
E           f
E           argv.py "$G" "$L"
E           argv.py "${dict["foo"]}" "${localdict["spam"]}"
E           argv.py "${ev["ev1"]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::dynamic flag in array in assign builtin[L854]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910d2b0>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='dynamic flag in array in assign builtin', script='typeset b\nb=(unused1 unused2)  # this works in mksh\...sertions=[Assertion(type='stdout', value='foo=F\nbar=B\nF\nB', shells=None, variant=None)], line_number=854, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: dynamic flag in array in assign builtin (line 854)
E           
E           stdout mismatch:
E             expected: 'foo=F\nbar=B\nF\nB'
E             actual:   'foo=\nbar=\nNone\nNone'
E           
E           Expected stdout: 'foo=F\nbar=B\nF\nB'
E           Actual stdout:   'foo=\nbar=\nNone\nNone\n'
E           Expected stderr: None
E           Actual stderr:   'bash: declare: - : invalid option\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset b
E           b=(unused1 unused2)  # this works in mksh
E           
E           a=(x 'foo=F' 'bar=B')
E           typeset -"${a[@]}"
E           echo foo=$foo
E           echo bar=$bar
E           printenv.py foo
E           printenv.py bar
E           
E           # syntax error in mksh!  But works in bash and zsh.
E           #typeset -"${a[@]}" b=(spam eggs)
E           #echo "length of b = ${#b[@]}"
E           #echo "b[0]=${b[0]}"
E           #echo "b[1]=${b[1]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::typeset +x[L878]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910d370>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='typeset +x', script='export e=E\nprintenv.py e\ntypeset +x e=E2\nprintenv.py e  # no longer exported', assertions=[Assertion(type='stdout', value='E\nNone', shells=None, variant=None)], line_number=878, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: typeset +x (line 878)
E           
E           stdout mismatch:
E             expected: 'E\nNone'
E             actual:   'E\nE2'
E           
E           Expected stdout: 'E\nNone'
E           Actual stdout:   'E\nE2\n'
E           Expected stderr: None
E           Actual stderr:   "bash: declare: `+x': not a valid identifier\n"
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           export e=E
E           printenv.py e
E           typeset +x e=E2
E           printenv.py e  # no longer exported
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::typeset +r removes read-only attribute (TODO: documented in bash to do nothing)[L888]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910d430>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='typeset +r removes read-only attribute (TODO: documented in bash to do nothing)', script="readonly r=r1...='OK'), Assertion(type='stdout', value='r=r1\nr=r1\nr=r1', shells=['bash'], variant='OK')], line_number=888, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: typeset +r removes read-only attribute (TODO: documented in bash to do nothing) (line 888)
E           
E           stdout mismatch:
E             expected: 'r=r1'
E             actual:   'r=r1\nr=r2\nr=r3'
E           
E           Expected stdout: 'r=r1'
E           Actual stdout:   'r=r1\nr=r2\nr=r3\n'
E           Expected stderr: None
E           Actual stderr:   "bash: declare: `+r': not a valid identifier\n"
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           readonly r=r1
E           echo r=$r
E           
E           # clear the readonly flag.  Why is this accepted in bash, but doesn't do
E           # anything?
E           typeset +r r=r2
E           echo r=$r
E           
E           r=r3
E           echo r=$r
E           
E           
E           # mksh doesn't allow you to unset
E           # just-bash treats readonly assignment as fatal
E           
E           # bash doesn't allow you to unset
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::function name with /[L927]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910d4f0>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='function name with /', script='ble/foo() { echo hi; }\ndeclare -F ble/foo\necho status=$?', assertions=...['ash'], variant='N-I'), Assertion(type='status', value=2, shells=['ash'], variant='N-I')], line_number=927, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: function name with / (line 927)
E           
E           stdout mismatch:
E             expected: 'ble/foo\nstatus=0'
E             actual:   'declare -f ble/foo\nstatus=0'
E           
E           Expected stdout: 'ble/foo\nstatus=0'
E           Actual stdout:   'declare -f ble/foo\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           ble/foo() { echo hi; }
E           declare -F ble/foo
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign-extended.test.sh::unset and shell funcs[L945]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910d670>
test_file = 'assign-extended.test.sh'
test_case = TestCase(name='unset and shell funcs', script='foo() {\n  echo bar\n}\n\nfoo\n\ndeclare -F\nunset foo\ndeclare -F\n\nf...ariant='N-I'), Assertion(type='stdout', value='bar\nbar', shells=['mksh'], variant='N-I')], line_number=945, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: unset and shell funcs (line 945)
E           
E           stdout mismatch:
E             expected: 'bar\ndeclare -f foo'
E             actual:   'bar\ndeclare -f foo\ndeclare -f foo\nbar'
E           status mismatch: expected 127, got 0
E           
E           Expected stdout: 'bar\ndeclare -f foo'
E           Actual stdout:   'bar\ndeclare -f foo\ndeclare -f foo\nbar\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 127
E           Actual status:   0
E           
E           Script:
E           ---
E           foo() {
E             echo bar
E           }
E           
E           foo
E           
E           declare -F
E           unset foo
E           declare -F
E           
E           foo
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::Escaped = in command name[L113]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910dd30>
test_file = 'assign.test.sh'
test_case = TestCase(name='Escaped = in command name', script="# foo=bar is in the 'spec/bin' dir.\nfoo\\=bar", assertions=[Assertion(type='stdout', value='HI', shells=None, variant=None)], line_number=113, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Escaped = in command name (line 113)
E           
E           stdout mismatch:
E             expected: 'HI'
E             actual:   ''
E           
E           Expected stdout: 'HI'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # foo=bar is in the 'spec/bin' dir.
E           foo\=bar
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::Env binding not allowed before compound command[L119]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910ddf0>
test_file = 'assign.test.sh'
test_case = TestCase(name='Env binding not allowed before compound command', script="# bash gives exit code 2 for syntax error, be...e, variant=None), Assertion(type='status', value=1, shells=['mksh', 'zsh'], variant='OK')], line_number=119, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Env binding not allowed before compound command (line 119)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 4, column 23
E           
E           
E           Script:
E           ---
E           # bash gives exit code 2 for syntax error, because of 'do'.
E           # dash gives 0 because there is stuff after for?  Should really give an error.
E           # mksh gives acceptable error of 1.
E           FOO=bar for i in a b; do printenv.py $FOO; done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::Trying to run keyword 'for'[L127]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910deb0>
test_file = 'assign.test.sh'
test_case = TestCase(name="Trying to run keyword 'for'", script='FOO=bar for', assertions=[Assertion(type='status', value=127, shells=None, variant=None), Assertion(type='status', value=1, shells=['zsh'], variant='OK')], line_number=127, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Trying to run keyword 'for' (line 127)
E           
E           status mismatch: expected 127, got 0
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 127
E           Actual status:   0
E           
E           Script:
E           ---
E           FOO=bar for
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::Env binding in readonly/declare is NOT exported!  (pitfall)[L148]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910e1b0>
test_file = 'assign.test.sh'
test_case = TestCase(name='Env binding in readonly/declare is NOT exported!  (pitfall)', script='# All shells agree on this, but i...iant=None), Assertion(type='stdout', value='v=None\nv2=', shells=['bash'], variant='BUG')], line_number=148, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Env binding in readonly/declare is NOT exported!  (pitfall) (line 148)
E           
E           stdout mismatch:
E             expected: 'v=None\nv2='
E             actual:   'v=foo\nv2=foo'
E           
E           Expected stdout: 'v=None\nv2='
E           Actual stdout:   'v=foo\nv2=foo\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # All shells agree on this, but it's very confusing behavior.
E           FOO=foo readonly v=$(printenv.py FOO)
E           echo "v=$v"
E           
E           # bash has probems here:
E           FOO=foo readonly v2=$FOO
E           echo "v2=$v2"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::'local x' does not set variable[L205]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910e4b0>
test_file = 'assign.test.sh'
test_case = TestCase(name="'local x' does not set variable", script='set -o nounset\nf() {\n  local x\n  echo $x\n}\nf', assertion...['dash'], variant='OK'), Assertion(type='status', value=0, shells=['zsh'], variant='BUG')], line_number=205, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: 'local x' does not set variable (line 205)
E           
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: None
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           set -o nounset
E           f() {
E             local x
E             echo $x
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::'local -a x' does not set variable[L216]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910e570>
test_file = 'assign.test.sh'
test_case = TestCase(name="'local -a x' does not set variable", script='set -o nounset\nf() {\n  local -a x\n  echo $x\n}\nf', ass...['dash'], variant='OK'), Assertion(type='status', value=0, shells=['zsh'], variant='BUG')], line_number=216, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: 'local -a x' does not set variable (line 216)
E           
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: None
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           set -o nounset
E           f() {
E             local -a x
E             echo $x
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::'declare -A' and then dict assignment[L240]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910e6f0>
test_file = 'assign.test.sh'
test_case = TestCase(name="'declare -A' and then dict assignment", script='declare -A foo\nkey=bar\nfoo["$key"]=value\necho ${foo[..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='N-I')], line_number=240, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: 'declare -A' and then dict assignment (line 240)
E           
E           stdout mismatch:
E             expected: 'value'
E             actual:   ''
E           
E           Expected stdout: 'value'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A foo
E           key=bar
E           foo["$key"]=value
E           echo ${foo["bar"]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::Reveal existence of "temp frame" (All shells disagree here!!!)[L281]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910e930>
test_file = 'assign.test.sh'
test_case = TestCase(name='Reveal existence of "temp frame" (All shells disagree here!!!)', script='f() {\n  echo "x=$x"\n\n  x=mu...nx=temp-binding\nx=mutated-temp\nx=mutated-temp\nx=\nx=', shells=['yash'], variant='BUG')], line_number=281, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Reveal existence of "temp frame" (All shells disagree here!!!) (line 281)
E           
E           stdout mismatch:
E             expected: 'x=temp-binding\nx=mutated-temp\nx=local\nx=global\nx=global'
E             actual:   'x=temp-binding\nx=mutated-temp\nx=local\nx=\nx=global'
E           
E           Expected stdout: 'x=temp-binding\nx=mutated-temp\nx=local\nx=global\nx=global'
E           Actual stdout:   'x=temp-binding\nx=mutated-temp\nx=local\nx=\nx=global\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E             echo "x=$x"
E           
E             x=mutated-temp  # mutate temp frame
E             echo "x=$x"
E           
E             # Declare a new local
E             local x='local'
E             echo "x=$x"
E           
E             # Unset it
E             unset x
E             echo "x=$x"
E           }
E           
E           x=global
E           x=temp-binding f
E           echo "x=$x"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::Test above without 'local' (which is not POSIX)[L338]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910e9f0>
test_file = 'assign.test.sh'
test_case = TestCase(name="Test above without 'local' (which is not POSIX)", script='f() {\n  echo "x=$x"\n\n  x=mutated-temp  # m...', value='x=temp-binding\nx=mutated-temp\nx=global\nx=global', shells=None, variant=None)], line_number=338, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Test above without 'local' (which is not POSIX) (line 338)
E           
E           stdout mismatch:
E             expected: 'x=temp-binding\nx=mutated-temp\nx=global\nx=global'
E             actual:   'x=temp-binding\nx=mutated-temp\nx=\nx=global'
E           
E           Expected stdout: 'x=temp-binding\nx=mutated-temp\nx=global\nx=global'
E           Actual stdout:   'x=temp-binding\nx=mutated-temp\nx=\nx=global\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E             echo "x=$x"
E           
E             x=mutated-temp  # mutate temp frame
E             echo "x=$x"
E           
E             # Unset it
E             unset x
E             echo "x=$x"
E           }
E           
E           x=global
E           x=temp-binding f
E           echo "x=$x"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::Using ${x-default} after unsetting a temp binding shadowing a global[L397]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910eb70>
test_file = 'assign.test.sh'
test_case = TestCase(name='Using ${x-default} after unsetting a temp binding shadowing a global', script='f() {\n  echo "x=$x"\n  ...ing\nx=local\n- operator = global\n:- operator = global', shells=['bash'], variant='BUG')], line_number=397, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Using ${x-default} after unsetting a temp binding shadowing a global (line 397)
E           
E           stdout mismatch:
E             expected: 'x=temp-binding\nx=local\n- operator = global\n:- operator = global'
E             actual:   'x=temp-binding\nx=local\n- operator = default\n:- operator = default'
E           
E           Expected stdout: 'x=temp-binding\nx=local\n- operator = global\n:- operator = global'
E           Actual stdout:   'x=temp-binding\nx=local\n- operator = default\n:- operator = default\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E             echo "x=$x"
E             local x='local'
E             echo "x=$x"
E             unset x
E             echo "- operator = ${x-default}"
E             echo ":- operator = ${x:-default}"
E           }
E           x=global
E           x=temp-binding f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::static assignment doesn't split[L427]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910ec30>
test_file = 'assign.test.sh'
test_case = TestCase(name="static assignment doesn't split", script='words=\'a b c\'\nexport ex=$words\nglo=$words\nreadonly ro=$w...e), Assertion(type='stdout', value="['a', 'a b c', 'a']", shells=['dash'], variant='BUG')], line_number=427, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: static assignment doesn't split (line 427)
E           
E           stdout mismatch:
E             expected: "['a b c', 'a b c', 'a b c']"
E             actual:   "['a', 'a b c', 'a']"
E           
E           Expected stdout: "['a b c', 'a b c', 'a b c']"
E           Actual stdout:   "['a', 'a b c', 'a']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           words='a b c'
E           export ex=$words
E           glo=$words
E           readonly ro=$words
E           argv.py "$ex" "$glo" "$ro"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::aliased assignment doesn't split[L442]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910ecf0>
test_file = 'assign.test.sh'
test_case = TestCase(name="aliased assignment doesn't split", script='shopt -s expand_aliases || true\nwords=\'a b c\'\nalias e=ex...t='BUG'), Assertion(type='stdout', value="['a b c', 'a b c']", shells=None, variant=None)], line_number=442, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: aliased assignment doesn't split (line 442)
E           
E           stdout mismatch:
E             expected: "['a b c', 'a b c']"
E             actual:   "['a', 'a']"
E           
E           Expected stdout: "['a b c', 'a b c']"
E           Actual stdout:   "['a', 'a']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases || true
E           words='a b c'
E           alias e=export
E           alias r=readonly
E           e ex=$words
E           r ro=$words
E           argv.py "$ex" "$ro"
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[assign.test.sh::assign and glob[L505]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910ef30>
test_file = 'assign.test.sh'
test_case = TestCase(name='assign and glob', script='cd $TMP\ntouch foo=a foo=b\nfoo=*\nargv.py "$foo"\nunset foo\n\nexport foo=*\...ant=None), Assertion(type='stdout', value="['*']\n['b']", shells=['dash'], variant='BUG')], line_number=505, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: assign and glob (line 505)
E           
E           stdout mismatch:
E             expected: "['*']\n['*']"
E             actual:   "['*']\n['b']"
E           
E           Expected stdout: "['*']\n['*']"
E           Actual stdout:   "['*']\n['b']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           cd $TMP
E           touch foo=a foo=b
E           foo=*
E           argv.py "$foo"
E           unset foo
E           
E           export foo=*
E           argv.py "$foo"
E           unset foo
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[assign.test.sh::declare and glob[L525]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910eff0>
test_file = 'assign.test.sh'
test_case = TestCase(name='declare and glob', script='cd $TMP\ntouch foo=a foo=b\ntypeset foo=*\nargv.py "$foo"\nunset foo', asser...ne, variant=None), Assertion(type='stdout', value="['']", shells=['dash'], variant='N-I')], line_number=525, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: declare and glob (line 525)
E           
E           stdout mismatch:
E             expected: "['*']"
E             actual:   "['b']"
E           
E           Expected stdout: "['*']"
E           Actual stdout:   "['b']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           cd $TMP
E           touch foo=a foo=b
E           typeset foo=*
E           argv.py "$foo"
E           unset foo
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::readonly $x where x='b c'[L538]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910f0b0>
test_file = 'assign.test.sh'
test_case = TestCase(name="readonly $x where x='b c'", script="one=a\ntwo='b c'\nreadonly $two $one\na=new\necho status=$?\nb=new\...rtion(type='stdout', value='status=1\nstatus=1\nstatus=1', shells=['bash'], variant='OK')], line_number=538, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: readonly $x where x='b c' (line 538)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1\nstatus=1'
E             actual:   'status=0\nstatus=0\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1\nstatus=1'
E           Actual stdout:   'status=0\nstatus=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           one=a
E           two='b c'
E           readonly $two $one
E           a=new
E           echo status=$?
E           b=new
E           echo status=$?
E           c=new
E           echo status=$?
E           
E           # in OSH and zsh, this is an invalid variable name
E           
E           # most shells make two variable read-only
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::readonly a=(1 2) no_value c=(3 4) makes 'no_value' readonly[L563]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910f170>
test_file = 'assign.test.sh'
test_case = TestCase(name="readonly a=(1 2) no_value c=(3 4) makes 'no_value' readonly", script='readonly a=(1 2) no_value c=(3 4)...lls=None, variant=None), Assertion(type='status', value=2, shells=['dash'], variant='OK')], line_number=563, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: readonly a=(1 2) no_value c=(3 4) makes 'no_value' readonly (line 563)
E           
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           readonly a=(1 2) no_value c=(3 4)
E           no_value=x
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::redirect after assignment builtin (eval redirects after evaluating arguments)[L596]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910f3b0>
test_file = 'assign.test.sh'
test_case = TestCase(name='redirect after assignment builtin (eval redirects after evaluating arguments)', script='# See also: spe...ne, variant=None), Assertion(type='stderr-json', value='', shells=['zsh'], variant='BUG')], line_number=596, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: redirect after assignment builtin (eval redirects after evaluating arguments) (line 596)
E           
E           stderr mismatch:
E             expected: 'STDERR'
E             actual:   ''
E           
E           Expected stdout: 'done'
E           Actual stdout:   'done\n'
E           Expected stderr: 'STDERR'
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # See also: spec/redir-order.test.sh (#2307)
E           # The $(stdout_stderr.py) is evaluated *before* the 2>/dev/null redirection
E           
E           readonly x=$(stdout_stderr.py) 2>/dev/null
E           echo done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::redirect after command sub (like case above but without assignment builtin)[L610]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910f470>
test_file = 'assign.test.sh'
test_case = TestCase(name='redirect after command sub (like case above but without assignment builtin)', script='echo stdout=$(std...=None, variant=None), Assertion(type='stderr', value='STDERR', shells=None, variant=None)], line_number=610, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: redirect after command sub (like case above but without assignment builtin) (line 610)
E           
E           stderr mismatch:
E             expected: 'STDERR'
E             actual:   ''
E           
E           Expected stdout: 'stdout=STDOUT'
E           Actual stdout:   'stdout=STDOUT\n'
E           Expected stderr: 'STDERR'
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo stdout=$(stdout_stderr.py) 2>/dev/null
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::redirect after bare assignment[L619]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910f530>
test_file = 'assign.test.sh'
test_case = TestCase(name='redirect after bare assignment', script='x=$(stdout_stderr.py) 2>/dev/null\necho done', assertions=[Ass..., variant=None), Assertion(type='stderr', value='STDERR', shells=['bash'], variant='BUG')], line_number=619, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: redirect after bare assignment (line 619)
E           
E           stderr mismatch:
E             expected: 'STDERR'
E             actual:   ''
E           
E           Expected stdout: 'done'
E           Actual stdout:   'done\n'
E           Expected stderr: 'STDERR'
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x=$(stdout_stderr.py) 2>/dev/null
E           echo done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[assign.test.sh::readonly array should not be modified by a+=(1)[L751]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910f9b0>
test_file = 'assign.test.sh'
test_case = TestCase(name='readonly array should not be modified by a+=(1)', script='case $SH in dash) exit 99 ;; esac # dash/mksh..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['dash'], variant='N-I')], line_number=751, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: readonly array should not be modified by a+=(1) (line 751)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   "['1', '2', '3', '4']\n['1', '2', '3', '4', '4']"
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   "['1', '2', '3', '4']\n['1', '2', '3', '4', '4']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit 99 ;; esac # dash/mksh does not support associative arrays
E           
E           a=(1 2 3)
E           readonly -a a
E           eval 'a+=(4)'
E           argv.py "${a[@]}"
E           eval 'declare -n r=a; r+=(4)'
E           argv.py "${a[@]}"
E           
E           # just-bash treats readonly assignment as fatal (matches mksh)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[ble-features.test.sh::[bash_unset] local-unset / dynamic-unset for localvar[L3]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910fa70>
test_file = 'ble-features.test.sh'
test_case = TestCase(name='[bash_unset] local-unset / dynamic-unset for localvar', script='unlocal() { unset -v "$1"; }\n\nf1() {\... global\n[global,local,(unlocal)] v: global', shells=['osh', 'mksh', 'yash'], variant='OK')], line_number=3, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [bash_unset] local-unset / dynamic-unset for localvar (line 3)
E           
E           Execution error: Expected ')' after '(' in function definition at line 24, column 16
E           
E           
E           Script:
E           ---
E           unlocal() { unset -v "$1"; }
E           
E           f1() {
E             local v=local
E             unset v
E             echo "[$1,local,(unset)] v: ${v-(unset)}"
E           }
E           v=global
E           f1 global
E           
E           f1() {
E             local v=local
E             unlocal v
E             echo "[$1,local,(unlocal)] v: ${v-(unset)}"
E           }
E           v=global
E           f1 'global'
E           
E           
E           
E           # always-value-unset
E           #   local-unset   = value-unset
E           #   dynamic-unset = value-unset
E           [global,local,(unset)] v: (unset)
E           [global,local,(unlocal)] v: (unset)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[ble-features.test.sh::[bash_unset] local-unset / dynamic-unset for localvar (mutated from tempenv)[L47]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910fb30>
test_file = 'ble-features.test.sh'
test_case = TestCase(name='[bash_unset] local-unset / dynamic-unset for localvar (mutated from tempenv)', script='unlocal() { unse...global,tempenv,local,(unlocal)] v: tempenv', shells=['osh', 'mksh', 'yash'], variant='OK')], line_number=47, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [bash_unset] local-unset / dynamic-unset for localvar (mutated from tempenv) (line 47)
E           
E           Execution error: Expected ')' after '(' in function definition at line 28, column 24
E           
E           
E           Script:
E           ---
E           unlocal() { unset -v "$1"; }
E           
E           f1() {
E             local v=local
E             unset v
E             echo "[$1,local,(unset)] v: ${v-(unset)}"
E           }
E           v=global
E           v=tempenv f1 'global,tempenv'
E           
E           f1() {
E             local v=local
E             unlocal v
E             echo "[$1,local,(unlocal)] v: ${v-(unset)}"
E           }
E           v=global
E           v=tempenv f1 'global,tempenv'
E           
E           
E           # Note on bug in bash 4.3 to bash 5.0
E           # [global,tempenv,local,(unset)] v: global
E           # [global,tempenv,local,(unlocal)] v: global
E           
E           
E           # always-value-unset
E           #   local-unset   = value-unset
E           #   dynamic-unset = value-unset
E           [global,tempenv,local,(unset)] v: (unset)
E           [global,tempenv,local,(unlocal)] v: (unset)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[ble-features.test.sh::[bash_unset] local-unset / dynamic-unset for tempenv[L95]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910fbf0>
test_file = 'ble-features.test.sh'
test_case = TestCase(name='[bash_unset] local-unset / dynamic-unset for tempenv', script='unlocal() { unset -v "$1"; }\n\nf1() {\n...lobal,tempenv,(unlocal)] v: (unset)', shells=['zsh', 'ash', 'dash', 'mksh'], variant='OK')], line_number=95, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [bash_unset] local-unset / dynamic-unset for tempenv (line 95)
E           
E           stdout mismatch:
E             expected: '# always-cell-unset, bash-unset\n#   local-unset   = cell-unset\n#   dynamic-unset = cell-unset\n[global,tempenv,(unset)] v: global\n[global,tempenv,(unlocal)] v: global'
E             actual:   '[global,tempenv,(unset)] v: (unset)\n[global,tempenv,(unlocal)] v: (unset)'
E           
E           Expected stdout: '# always-cell-unset, bash-unset\n#   local-unset   = cell-unset\n#   dynamic-unset = cell-unset\n[global,tempenv,(unset)] v: global\n[global,tempenv,(unlocal)] v: global'
E           Actual stdout:   '[global,tempenv,(unset)] v: (unset)\n[global,tempenv,(unlocal)] v: (unset)\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           unlocal() { unset -v "$1"; }
E           
E           f1() {
E             unset v
E             echo "[$1,(unset)] v: ${v-(unset)}"
E           }
E           v=global
E           v=tempenv f1 'global,tempenv'
E           
E           f1() {
E             unlocal v
E             echo "[$1,(unlocal)] v: ${v-(unset)}"
E           }
E           v=global
E           v=tempenv f1 'global,tempenv'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[ble-features.test.sh::[bash_unset] function call with tempenv vs tempenv-eval[L128]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910fcb0>
test_file = 'ble-features.test.sh'
test_case = TestCase(name='[bash_unset] function call with tempenv vs tempenv-eval', script='unlocal() { unset -v "$1"; }\n\nf5() ...,tempenv,(eval),local+unlocal] v: tempenv', shells=['osh', 'yash', 'mksh'], variant='OK')], line_number=128, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [bash_unset] function call with tempenv vs tempenv-eval (line 128)
E           
E           Execution error: Expected ')' after '(' in function definition at line 41, column 18
E           
E           
E           Script:
E           ---
E           unlocal() { unset -v "$1"; }
E           
E           f5() {
E             echo "[$1] v: ${v-(unset)}"
E             local v
E             echo "[$1,local] v: ${v-(unset)}"
E             ( unset v
E               echo "[$1,local+unset] v: ${v-(unset)}" )
E             ( unlocal v
E               echo "[$1,local+unlocal] v: ${v-(unset)}" )
E           }
E           v=global
E           f5 'global'
E           v=tempenv f5 'global,tempenv'
E           v=tempenv eval 'f5 "global,tempenv,(eval)"'
E           
E           
E           # Note on bug in bash 4.3 to bash 5.0
E           # [global] v: global
E           # [global,local] v: (unset)
E           # [global,local+unset] v: (unset)
E           # [global,local+unlocal] v: global
E           # [global,tempenv] v: tempenv
E           # [global,tempenv,local] v: tempenv
E           # [global,tempenv,local+unset] v: global
E           # [global,tempenv,local+unlocal] v: global
E           # [global,tempenv,(eval)] v: tempenv
E           # [global,tempenv,(eval),local] v: tempenv
E           # [global,tempenv,(eval),local+unset] v: (unset)
E           # [global,tempenv,(eval),local+unlocal] v: tempenv
E           
E           # always-value-unset x init.unset
E           [global] v: global
E           [global,local] v: (unset)
E           [global,local+unset] v: (unset)
E           [global,local+unlocal] v: (unset)
E           [global,tempenv] v: tempenv
E           [global,tempenv,local] v: tempenv
E           [global,tempenv,local+unset] v: (unset)
E           [global,tempenv,local+unlocal] v: (unset)
E           [global,tempenv,(eval)] v: tempenv
E           [global,tempenv,(eval),local] v: (unset)
E           [global,tempenv,(eval),local+unset] v: (unset)
E           [global,tempenv,(eval),local+unlocal] v: (unset)
E           
E           # always-value-unset x init.empty
E           [global] v: global
E           [global,local] v: 
E           [global,local+unset] v: (unset)
E           [global,local+unlocal] v: (unset)
E           [global,tempenv] v: tempenv
E           [global,tempenv,local] v: 
E           [global,tempenv,local+unset] v: (unset)
E           [global,tempenv,local+unlocal] v: (unset)
E           [global,tempenv,(eval)] v: tempenv
E           [global,tempenv,(eval),local] v: 
E           [global,tempenv,(eval),local+unset] v: (unset)
E           [global,tempenv,(eval),local+unlocal] v: (unset)
E           
E           # always-value-unset x init.inherit
E           [global] v: global
E           [global,local] v: global
E           [global,local+unset] v: (unset)
E           [global,local+unlocal] v: (unset)
E           [global,tempenv] v: tempenv
E           [global,tempenv,local] v: tempenv
E           [global,tempenv,local+unset] v: (unset)
E           [global,tempenv,local+unlocal] v: (unset)
E           [global,tempenv,(eval)] v: tempenv
E           [global,tempenv,(eval),local] v: tempenv
E           [global,tempenv,(eval),local+unset] v: (unset)
E           [global,tempenv,(eval),local+unlocal] v: (unset)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[ble-features.test.sh::[bash_unset] localvar-inherit from tempenv[L241]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910fd70>
test_file = 'ble-features.test.sh'
test_case = TestCase(name='[bash_unset] localvar-inherit from tempenv', script='f1() {\n  local v\n  echo "[$1,(local)] v: ${v-(un...n[global,local,(func),(local)] v: (unset)', shells=['osh', 'mksh', 'yash'], variant='OK')], line_number=241, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [bash_unset] localvar-inherit from tempenv (line 241)
E           
E           Execution error: Expected ')' after '(' in function definition at line 27, column 10
E           
E           
E           Script:
E           ---
E           f1() {
E             local v
E             echo "[$1,(local)] v: ${v-(unset)}"
E           }
E           f2() {
E             f1 "$1,(func)"
E           }
E           f3() {
E             local v=local
E             f1 "$1,local,(func)"
E           }
E           v=global
E           
E           f1 'global'
E           v=tempenv f1 'global,tempenv'
E           (export v=global; f1 'xglobal')
E           
E           f2 'global'
E           v=tempenv f2 'global,tempenv'
E           (export v=global; f2 'xglobal')
E           
E           f3 'global'
E           
E           
E           
E           # init.unset x tempenv-in-localctx
E           [global,(local)] v: (unset)
E           [global,tempenv,(local)] v: tempenv
E           [xglobal,(local)] v: (unset)
E           [global,(func),(local)] v: (unset)
E           [global,tempenv,(func),(local)] v: (unset)
E           [xglobal,(func),(local)] v: (unset)
E           [global,local,(func),(local)] v: (unset)
E           
E           # init.empty
E           [global,(local)] v: 
E           [global,tempenv,(local)] v: 
E           [xglobal,(local)] v: 
E           [global,(func),(local)] v: 
E           [global,tempenv,(func),(local)] v: 
E           [xglobal,(func),(local)] v: 
E           [global,local,(func),(local)] v: 
E           
E           # init.inherit
E           [global,(local)] v: global
E           [global,tempenv,(local)] v: tempenv
E           [xglobal,(local)] v: global
E           [global,(func),(local)] v: global
E           [global,tempenv,(func),(local)] v: tempenv
E           [xglobal,(func),(local)] v: global
E           [global,local,(func),(local)] v: local
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[ble-features.test.sh::[compat_array] scalar write to arrays[L336]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910fef0>
test_file = 'ble-features.test.sh'
test_case = TestCase(name='[compat_array] scalar write to arrays', script='case ${SH##*/} in\n(dash|ash) exit 1;; # dash/ash does ... Instead, it replaces the array\n# with a scalar.\n['1']", shells=['yash'], variant='OK')], line_number=336, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [compat_array] scalar write to arrays (line 336)
E           
E           stdout mismatch:
E             expected: "['2', '0', '0']"
E             actual:   "['1', '0', '0']"
E           
E           Expected stdout: "['2', '0', '0']"
E           Actual stdout:   "['1', '0', '0']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case ${SH##*/} in
E           (dash|ash) exit 1;; # dash/ash does not have arrays
E           (osh) shopt -s compat_array;;
E           (zsh) setopt KSH_ARRAYS;;
E           esac
E           
E           a=(1 0 0)
E           : $(( a++ ))
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[ble-features.test.sh::[compat_array] scalar write to associative arrays[L358]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10910ffb0>
test_file = 'ble-features.test.sh'
test_case = TestCase(name='[compat_array] scalar write to associative arrays', script="case ${SH##*/} in\n(dash|ash|yash|mksh) exi... Assertion(type='stdout', value="['1', 'hello', 'world']", shells=['zsh'], variant='N-I')], line_number=358, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [compat_array] scalar write to associative arrays (line 358)
E           
E           stdout mismatch:
E             expected: "['2', 'hello', 'world']"
E             actual:   "['1', 'hello', 'world']"
E           
E           Expected stdout: "['2', 'hello', 'world']"
E           Actual stdout:   "['1', 'hello', 'world']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case ${SH##*/} in
E           (dash|ash|yash|mksh) exit 1;; # dash/ash/yash/mksh does not have associative arrays
E           (osh) shopt -s compat_array;;
E           (zsh) setopt KSH_ARRAYS;;
E           esac
E           
E           declare -A d=()
E           d['0']=1
E           d['foo']=hello
E           d['bar']=world
E           ((d++))
E           argv.py ${d['0']} ${d['foo']} ${d['bar']}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[ble-unset.test.sh::[bash_unset] nested context by tempenv-eval[L7]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109134170>
test_file = 'ble-unset.test.sh'
test_case = TestCase(name='[bash_unset] nested context by tempenv-eval', script='f1() {\n  local v=local1\n  echo "[$1,local1] v: ...,local2] v: local2\n[global,tempenv1,local1] v: local2 (after)', shells=None, variant=None)], line_number=7, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [bash_unset] nested context by tempenv-eval (line 7)
E           
E           stdout mismatch:
E             expected: '# localvar-nest yes\n[global,tempenv1,local1] v: local1\n[global,tempenv1,local1,tempenv2,(eval)] v: tempenv2\n[global,tempenv1,local1,tempenv2,(eval),local2] v: local2\n[global,tempenv1,local1] v: local1 (after)'
E             actual:   '[global,tempenv1,local1] v: local1\n[global,tempenv1,local1,tempenv2,(eval)] v: tempenv2\n[global,tempenv1,local1,tempenv2,(eval),local2] v: local2\n[global,tempenv1,local1] v: local1 (after)'
E           
E           Expected stdout: '# localvar-nest yes\n[global,tempenv1,local1] v: local1\n[global,tempenv1,local1,tempenv2,(eval)] v: tempenv2\n[global,tempenv1,local1,tempenv2,(eval),local2] v: local2\n[global,tempenv1,local1] v: local1 (after)'
E           Actual stdout:   '[global,tempenv1,local1] v: local1\n[global,tempenv1,local1,tempenv2,(eval)] v: tempenv2\n[global,tempenv1,local1,tempenv2,(eval),local2] v: local2\n[global,tempenv1,local1] v: local1 (after)\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f1() {
E             local v=local1
E             echo "[$1,local1] v: ${v-(unset)}"
E             v=tempenv2 eval '
E               echo "[$1,local1,tempenv2,(eval)] v: ${v-(unset)}"
E               local v=local2
E               echo "[$1,local1,tempenv2,(eval),local2] v: ${v-(unset)}"
E             '
E             echo "[$1,local1] v: ${v-(unset)} (after)"
E           }
E           v=global
E           v=tempenv1 f1 global,tempenv1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[ble-unset.test.sh::[bash_unset] local-unset / dynamic-unset for localvar on nested-context[L37]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109134230>
test_file = 'ble-unset.test.sh'
test_case = TestCase(name='[bash_unset] local-unset / dynamic-unset for localvar on nested-context', script='unlocal() { unset -v ...tempenv1,local1,tempenv2,(eval),local2,(unlocal)] v: tempenv1', shells=None, variant=None)], line_number=37, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [bash_unset] local-unset / dynamic-unset for localvar on nested-context (line 37)
E           
E           Execution error: Expected ')' after '(' in function definition at line 16, column 35
E           
E           
E           Script:
E           ---
E           unlocal() { unset -v "$1"; }
E           
E           f2() {
E             local v=local1
E             v=tempenv2 eval '
E               local v=local2
E               (unset v  ; echo "[$1,local1,tempenv2,(eval),local2,(unset)] v: ${v-(unset)}")
E               (unlocal v; echo "[$1,local1,tempenv2,(eval),local2,(unlocal)] v: ${v-(unset)}")
E             '
E           }
E           v=global
E           v=tempenv1 f2 global,tempenv1
E           
E           
E           # Note that bash-4.3 to bash 5.0 behave differently
E           # [global,tempenv1,local1,tempenv2,(eval),local2,(unset)] v: local1
E           # [global,tempenv1,local1,tempenv2,(eval),local2,(unlocal)] v: local1
E           
E           # always-value-unset
E           [global,tempenv1,local1,tempenv2,(eval),local2,(unset)] v: (unset)
E           [global,tempenv1,local1,tempenv2,(eval),local2,(unlocal)] v: (unset)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[ble-unset.test.sh::[bash_unset] dynamic-unset for nested localvars[L73]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091342f0>
test_file = 'ble-unset.test.sh'
test_case = TestCase(name='[bash_unset] dynamic-unset for nested localvars', script='unlocal() { unset -v "$1"; }\n\nf3() {\n  loc...ocal1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 4)', shells=None, variant=None)], line_number=73, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [bash_unset] dynamic-unset for nested localvars (line 73)
E           
E           stdout mismatch:
E             expected: '# cell-unset x localvar-tempenv-share x tempenv-in-localctx\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local3\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local2 (unlocal 1)\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local1 (unlocal 2)\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: global (unlocal 3)\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 4)'
E             actual:   '[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 1)\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 2)\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 3)\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 4)'
E           
E           Expected stdout: '# cell-unset x localvar-tempenv-share x tempenv-in-localctx\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local3\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local2 (unlocal 1)\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local1 (unlocal 2)\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: global (unlocal 3)\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 4)'
E           Actual stdout:   '[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 1)\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 2)\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 3)\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 4)\n'
E           Expected stderr: None
E           Actual stderr:   'bash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           unlocal() { unset -v "$1"; }
E           
E           f3() {
E             local v=local1
E             v=tempenv2 eval '
E               local v=local2
E               v=tempenv3 eval "
E                 local v=local3
E                 echo \"[\$1/local1,tempenv2/local2,tempenv3/local3] v: \${v-(unset)}\"
E                 unlocal v
E                 echo \"[\$1/local1,tempenv2/local2,tempenv3/local3] v: \${v-(unset)} (unlocal 1)\"
E                 unlocal v
E                 echo \"[\$1/local1,tempenv2/local2,tempenv3/local3] v: \${v-(unset)} (unlocal 2)\"
E                 unlocal v
E                 echo \"[\$1/local1,tempenv2/local2,tempenv3/local3] v: \${v-(unset)} (unlocal 3)\"
E                 unlocal v
E                 echo \"[\$1/local1,tempenv2/local2,tempenv3/local3] v: \${v-(unset)} (unlocal 4)\"
E               "
E             '
E           }
E           v=global
E           v=tempenv1 f3 global,tempenv1
E           
E           
E           # value-unset
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local3
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 1)
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 2)
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 3)
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 4)
E           
E           
E           # cell-unset (remove all localvar/tempenv) x tempenv-value-unset
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local3
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: tempenv1 (unlocal 1)
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 2)
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 3)
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 4)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[ble-unset.test.sh::[bash_unset] dynamic-unset for nested tempenvs[L135]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091343b0>
test_file = 'ble-unset.test.sh'
test_case = TestCase(name='[bash_unset] dynamic-unset for nested tempenvs', script='unlocal() { unset -v "$1"; }\n\nf4_unlocal() {...\n[global,tempenv1,tempenv2,tempenv3] v: (unset) (unlocal 4)', shells=None, variant=None)], line_number=135, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [bash_unset] dynamic-unset for nested tempenvs (line 135)
E           
E           stdout mismatch:
E             expected: '[global,tempenv1,tempenv2,tempenv3] v: tempenv3\n[global,tempenv1,tempenv2,tempenv3] v: tempenv2 (unlocal 1)\n[global,tempenv1,tempenv2,tempenv3] v: tempenv1 (unlocal 2)\n[global,tempenv1,tempenv2,tempenv3] v: global (unlocal 3)\n[global,tempenv1,tempenv2,tempenv3] v: (unset) (unlocal 4)'
E             actual:   '[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unlocal 1)\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unlocal 2)\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unlocal 3)\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unlocal 4)'
E           
E           Expected stdout: '[global,tempenv1,tempenv2,tempenv3] v: tempenv3\n[global,tempenv1,tempenv2,tempenv3] v: tempenv2 (unlocal 1)\n[global,tempenv1,tempenv2,tempenv3] v: tempenv1 (unlocal 2)\n[global,tempenv1,tempenv2,tempenv3] v: global (unlocal 3)\n[global,tempenv1,tempenv2,tempenv3] v: (unset) (unlocal 4)'
E           Actual stdout:   '[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unlocal 1)\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unlocal 2)\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unlocal 3)\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unlocal 4)\n'
E           Expected stderr: None
E           Actual stderr:   'bash: [global,tempenv1,tempenv2,tempenv3]: command not found\nbash: [global,tempenv1,tempenv2,tempenv3]: command not found\nbash: [global,tempenv1,tempenv2,tempenv3]: command not found\nbash: [global,tempenv1,tempenv2,tempenv3]: command not found\nbash: [global,tempenv1,tempenv2,tempenv3]: command not found\nbash: [global,tempenv1,tempenv2,tempenv3]: command not found\nbash: [global,tempenv1,tempenv2,tempenv3]: command not found\nbash: [global,tempenv1,tempenv2,tempenv3]: command not found\nbash: [global,tempenv1,tempenv2,tempenv3]: command not found\nbash: [global,tempenv1,tempenv2,tempenv3]: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           unlocal() { unset -v "$1"; }
E           
E           f4_unlocal() {
E             v=tempenv2 eval '
E               v=tempenv3 eval "
E                 echo \"[\$1,tempenv2,tempenv3] v: \${v-(unset)}\"
E                 unlocal v
E                 echo \"[\$1,tempenv2,tempenv3] v: \${v-(unset)} (unlocal 1)\"
E                 unlocal v
E                 echo \"[\$1,tempenv2,tempenv3] v: \${v-(unset)} (unlocal 2)\"
E                 unlocal v
E                 echo \"[\$1,tempenv2,tempenv3] v: \${v-(unset)} (unlocal 3)\"
E                 unlocal v
E                 echo \"[\$1,tempenv2,tempenv3] v: \${v-(unset)} (unlocal 4)\"
E               "
E             '
E           }
E           v=global
E           v=tempenv1 f4_unlocal global,tempenv1
E           
E           
E           
E           # cell-unset
E           [global,tempenv1,tempenv2,tempenv3] v: tempenv3
E           [global,tempenv1,tempenv2,tempenv3] v: tempenv1 (unlocal 1)
E           [global,tempenv1,tempenv2,tempenv3] v: global (unlocal 2)
E           [global,tempenv1,tempenv2,tempenv3] v: (unset) (unlocal 3)
E           [global,tempenv1,tempenv2,tempenv3] v: (unset) (unlocal 4)
E           
E           # remove all tempenv3
E           [global,tempenv1,tempenv2,tempenv3] v: tempenv3
E           [global,tempenv1,tempenv2,tempenv3] v: (unset) (unlocal 1)
E           [global,tempenv1,tempenv2,tempenv3] v: (unset) (unlocal 2)
E           [global,tempenv1,tempenv2,tempenv3] v: (unset) (unlocal 3)
E           [global,tempenv1,tempenv2,tempenv3] v: (unset) (unlocal 4)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[ble-unset.test.sh::[bash_unset] local-unset for nested tempenvs[L191]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109134470>
test_file = 'ble-unset.test.sh'
test_case = TestCase(name='[bash_unset] local-unset for nested tempenvs', script='f4_unset() {\n  v=tempenv2 eval \'\n    v=tempen...global,tempenv1,tempenv2,tempenv3] v: (unset) (unset 4)', shells=['yash'], variant='BUG')], line_number=191, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [bash_unset] local-unset for nested tempenvs (line 191)
E           
E           stdout mismatch:
E             expected: '[global,tempenv1,tempenv2,tempenv3] v: tempenv3\n[global,tempenv1,tempenv2,tempenv3] v: tempenv2 (unset 1)\n[global,tempenv1,tempenv2,tempenv3] v: tempenv1 (unset 2)\n[global,tempenv1,tempenv2,tempenv3] v: global (unset 3)\n[global,tempenv1,tempenv2,tempenv3] v: (unset) (unset 4)'
E             actual:   '[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unset 1)\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unset 2)\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unset 3)\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unset 4)'
E           
E           Expected stdout: '[global,tempenv1,tempenv2,tempenv3] v: tempenv3\n[global,tempenv1,tempenv2,tempenv3] v: tempenv2 (unset 1)\n[global,tempenv1,tempenv2,tempenv3] v: tempenv1 (unset 2)\n[global,tempenv1,tempenv2,tempenv3] v: global (unset 3)\n[global,tempenv1,tempenv2,tempenv3] v: (unset) (unset 4)'
E           Actual stdout:   '[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unset 1)\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unset 2)\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unset 3)\n[\\global,tempenv1,tempenv2,tempenv3] v: \\tempenv3 (unset 4)\n'
E           Expected stderr: None
E           Actual stderr:   'bash: [global,tempenv1,tempenv2,tempenv3]: command not found\nbash: [global,tempenv1,tempenv2,tempenv3]: command not found\nbash: [global,tempenv1,tempenv2,tempenv3]: command not found\nbash: [global,tempenv1,tempenv2,tempenv3]: command not found\nbash: [global,tempenv1,tempenv2,tempenv3]: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f4_unset() {
E             v=tempenv2 eval '
E               v=tempenv3 eval "
E                 echo \"[\$1,tempenv2,tempenv3] v: \${v-(unset)}\"
E                 unset v
E                 echo \"[\$1,tempenv2,tempenv3] v: \${v-(unset)} (unset 1)\"
E                 unset v
E                 echo \"[\$1,tempenv2,tempenv3] v: \${v-(unset)} (unset 2)\"
E                 unset v
E                 echo \"[\$1,tempenv2,tempenv3] v: \${v-(unset)} (unset 3)\"
E                 unset v
E                 echo \"[\$1,tempenv2,tempenv3] v: \${v-(unset)} (unset 4)\"
E               "
E             '
E           }
E           v=global
E           v=tempenv1 f4_unset global,tempenv1
E           
E           
E           
E           # cell-unset
E           [global,tempenv1,tempenv2,tempenv3] v: tempenv3
E           [global,tempenv1,tempenv2,tempenv3] v: tempenv1 (unset 1)
E           [global,tempenv1,tempenv2,tempenv3] v: global (unset 2)
E           [global,tempenv1,tempenv2,tempenv3] v: (unset) (unset 3)
E           [global,tempenv1,tempenv2,tempenv3] v: (unset) (unset 4)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bool-parse.test.sh::test builtin: ( = ) is confusing: equality test or non-empty string test[L44]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091345f0>
test_file = 'bool-parse.test.sh'
test_case = TestCase(name='test builtin: ( = ) is confusing: equality test or non-empty string test', script="# here it's equality...None), Assertion(type='stdout', value='status=0\nstatus=1', shells=['zsh'], variant='BUG')], line_number=44, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: test builtin: ( = ) is confusing: equality test or non-empty string test (line 44)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=0'
E             actual:   'status=0\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=0'
E           Actual stdout:   'status=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # here it's equality
E           test '(' = ')'
E           echo status=$?
E           
E           # here it's like -n =
E           test 0 -eq 0 -a '(' = ')'
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bool-parse.test.sh::test builtin: ( == ) is confusing: equality test or non-empty string test[L64]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091346b0>
test_file = 'bool-parse.test.sh'
test_case = TestCase(name='test builtin: ( == ) is confusing: equality test or non-empty string test', script="# here it's equalit...['zsh'], variant='BUG'), Assertion(type='stdout', value='', shells=['zsh'], variant='BUG')], line_number=64, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: test builtin: ( == ) is confusing: equality test or non-empty string test (line 64)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=0'
E             actual:   'status=0\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=0'
E           Actual stdout:   'status=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # here it's equality
E           test '(' == ')'
E           echo status=$?
E           
E           # here it's like -n ==
E           test 0 -eq 0 -a '(' == ')'
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bool-parse.test.sh::Not allowed: [[ ) ]] and [[ ( ]][L110]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109134830>
test_file = 'bool-parse.test.sh'
test_case = TestCase(name='Not allowed: [[ ) ]] and [[ ( ]]', script='[[ ) ]]\necho status=$?\n[[ ( ]]\necho status=$?', assertion..., variant='OK'), Assertion(type='stdout', value='status=1', shells=['zsh'], variant='OK')], line_number=110, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Not allowed: [[ ) ]] and [[ ( ]] (line 110)
E           
E           Execution error: Expected conditional expression at line 1, column 4
E           
E           
E           Script:
E           ---
E           [[ ) ]]
E           echo status=$?
E           [[ ( ]]
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::partial leading expansion[L13]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109134cb0>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name='partial leading expansion', script='echo }_{a,b}', assertions=[Assertion(type='stdout', value='}_a }_b'...ne, variant=None), Assertion(type='stdout', value='}_{a,b}', shells=['osh'], variant='OK')], line_number=13, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: partial leading expansion (line 13)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 1, column 6
E           
E           
E           Script:
E           ---
E           echo }_{a,b}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::double expansion with single and double quotes[L44]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109135130>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name='double expansion with single and double quotes', script='echo {\'a\',b}_{c,"d"}', assertions=[Assertion(type='stdout', value='a_c a_d b_c b_d', shells=None, variant=None)], line_number=44, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: double expansion with single and double quotes (line 44)
E           
E           stdout mismatch:
E             expected: 'a_c a_d b_c b_d'
E             actual:   '{a,b}_{c,d}'
E           
E           Expected stdout: 'a_c a_d b_c b_d'
E           Actual stdout:   '{a,b}_{c,d}\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo {'a',b}_{c,"d"}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::expansion with mixed quotes[L48]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091351f0>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name='expansion with mixed quotes', script='echo -{\\X"b",\'cd\'}-', assertions=[Assertion(type='stdout', value='-Xb- -cd-', shells=None, variant=None)], line_number=48, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: expansion with mixed quotes (line 48)
E           
E           stdout mismatch:
E             expected: '-Xb- -cd-'
E             actual:   '-{Xb,cd}-'
E           
E           Expected stdout: '-Xb- -cd-'
E           Actual stdout:   '-{Xb,cd}-\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo -{\X"b",'cd'}-
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::double expansion with simple var -- bash bug[L57]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109135370>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name='double expansion with simple var -- bash bug', script='# bash is inconsistent with the above\na=A\necho..., variant=None), Assertion(type='stdout', value='b_c b_d', shells=['bash'], variant='BUG')], line_number=57, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: double expansion with simple var -- bash bug (line 57)
E           
E           stdout mismatch:
E             expected: 'b_c b_d'
E             actual:   'A_c b_c A_d b_d'
E           
E           Expected stdout: 'b_c b_d'
E           Actual stdout:   'A_c b_c A_d b_d\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash is inconsistent with the above
E           a=A
E           echo {$a,b}_{c,d}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::double expansion with braced variable[L64]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109135430>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name='double expansion with braced variable', script='# This fixes it\na=A\necho {${a},b}_{c,d}', assertions=[Assertion(type='stdout', value='A_c A_d b_c b_d', shells=None, variant=None)], line_number=64, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: double expansion with braced variable (line 64)
E           
E           stdout mismatch:
E             expected: 'A_c A_d b_c b_d'
E             actual:   'A_c b_c A_d b_d'
E           
E           Expected stdout: 'A_c A_d b_c b_d'
E           Actual stdout:   'A_c b_c A_d b_d\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # This fixes it
E           a=A
E           echo {${a},b}_{c,d}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::double expansion with literal and simple var[L70]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091354f0>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name='double expansion with literal and simple var', script='a=A\necho {_$a,b}_{c,d}', assertions=[Assertion(...riant=None), Assertion(type='stdout', value='_ _ b_c b_d', shells=['bash'], variant='BUG')], line_number=70, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: double expansion with literal and simple var (line 70)
E           
E           stdout mismatch:
E             expected: '_ _ b_c b_d'
E             actual:   '_A_c b_c _A_d b_d'
E           
E           Expected stdout: '_ _ b_c b_d'
E           Actual stdout:   '_A_c b_c _A_d b_d\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=A
E           echo {_$a,b}_{c,d}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::Empty alternative with empty string suffix[L115]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109135af0>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name='Empty alternative with empty string suffix', script="# zsh and mksh don't do word elision, probably bec... shells=None, variant=None), Assertion(type='status', value=0, shells=None, variant=None)], line_number=115, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Empty alternative with empty string suffix (line 115)
E           
E           stdout mismatch:
E             expected: "['X', '', 'Y', '']"
E             actual:   "['X', 'Y']"
E           
E           Expected stdout: "['X', '', 'Y', '']"
E           Actual stdout:   "['X', 'Y']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # zsh and mksh don't do word elision, probably because they do brace expansion
E           # AFTER variable substitution.
E           argv.py {X,,Y,}''
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::expansion on RHS of assignment[L134]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109135df0>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name='expansion on RHS of assignment', script="# I think bash's behavior is more consistent.  No splitting ei...one, variant=None), Assertion(type='stdout', value='X Y', shells=['mksh'], variant='BUG')], line_number=134, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: expansion on RHS of assignment (line 134)
E           
E           stdout mismatch:
E             expected: '{X,Y}'
E             actual:   'X Y'
E           
E           Expected stdout: '{X,Y}'
E           Actual stdout:   'X Y\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # I think bash's behavior is more consistent.  No splitting either.
E           v={X,Y}
E           echo $v
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::no expansion with RHS assignment[L141]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109135eb0>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name='no expansion with RHS assignment', script='{v,x}=X', assertions=[Assertion(type='status', value=127, sh...ells=None, variant=None), Assertion(type='status', value=1, shells=['zsh'], variant='OK')], line_number=141, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: no expansion with RHS assignment (line 141)
E           
E           status mismatch: expected 127, got 1
E           
E           Expected stdout: ''
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: {v,x}=X: command not found\n'
E           Expected status: 127
E           Actual status:   1
E           
E           Script:
E           ---
E           {v,x}=X
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::Tilde expansion with brace expansion[L157]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109136030>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name='Tilde expansion with brace expansion', script='# The brace expansion happens FIRST.  After that, the se...bar ~/bar\n-- foo~/bar\n-- ~/bar\n== foo~/bar\n== ~/bar', shells=['mksh'], variant='BUG')], line_number=157, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Tilde expansion with brace expansion (line 157)
E           
E           stdout mismatch:
E             expected: 'foo~/bar /home/bob/bar\n-- foo~/bar\n-- /home/bob/bar\n== foo~/bar\n== /home/bob/bar'
E             actual:   'foo~/bar ~/bar\n-- foo~/bar\n-- ~/bar\n== {foo~,~}/bar'
E           
E           Expected stdout: 'foo~/bar /home/bob/bar\n-- foo~/bar\n-- /home/bob/bar\n== foo~/bar\n== /home/bob/bar'
E           Actual stdout:   'foo~/bar ~/bar\n-- foo~/bar\n-- ~/bar\n== {foo~,~}/bar\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # The brace expansion happens FIRST.  After that, the second token has tilde
E           # FIRST, so it gets expanded.  The first token has an unexpanded tilde, because
E           # it's not in the leading position.
E           
E           HOME=/home/bob
E           
E           # Command
E           
E           echo {foo~,~}/bar
E           
E           # Loop
E           
E           for x in {foo~,~}/bar; do
E             echo -- $x
E           done
E           
E           # Array
E           
E           a=({foo~,~}/bar)
E           
E           for y in "${a[@]}"; do
E             echo "== $y"
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::Two kinds of tilde expansion[L199]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091360f0>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name='Two kinds of tilde expansion', script='HOME=/home/bob\n\n# Command\necho ~{/src,root}\n\n# Loop\n\nfor ...ue='~/src ~root\n-- ~/src\n-- ~root\n== ~/src\n== ~root', shells=['mksh'], variant='BUG')], line_number=199, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Two kinds of tilde expansion (line 199)
E           
E           stdout mismatch:
E             expected: '/home/bob/src /root\n-- /home/bob/src\n-- /root\n== /home/bob/src\n== /root'
E             actual:   '/home/bob/src /home/bobroot\n-- /home/bob/src\n-- /home/bobroot\n== /home/bob{/src,root}'
E           
E           Expected stdout: '/home/bob/src /root\n-- /home/bob/src\n-- /root\n== /home/bob/src\n== /root'
E           Actual stdout:   '/home/bob/src /home/bobroot\n-- /home/bob/src\n-- /home/bobroot\n== /home/bob{/src,root}\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           HOME=/home/bob
E           
E           # Command
E           echo ~{/src,root}
E           
E           # Loop
E           
E           for x in ~{/src,root}; do
E             echo -- $x
E           done
E           
E           # Array
E           
E           a=(~{/src,root})
E           
E           for y in "${a[@]}"; do
E             echo "== $y"
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::Mixed case char expansion is invalid[L370]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109136ab0>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name='Mixed case char expansion is invalid', script='case $SH in *zsh) echo BUG; exit ;; esac\necho -{z..A}-\...bash'], variant='BUG'), Assertion(type='status', value=1, shells=['bash'], variant='BUG')], line_number=370, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Mixed case char expansion is invalid (line 370)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   '-z- -y- -x- -w- -v- -u- -t- -s- -r- -q- -p- -o- -n- -m- -l- -k- -j- -i- -h- -g- -f- -e- -d- -c- -b- -a- -`- -_- -^- -]- -\\- -[- -Z- -Y- -X- -W- -V- -U- -T- -S- -R- -Q- -P- -O- -N- -M- -L- -K- -J- -I- -H- -G- -F- -E- -D- -C- -B- -A-\n-z- -x- -v- -t- -r- -p- -n- -l- -j- -h- -f- -d- -b- -`- -^- -\\- -Z- -X- -V- -T- -R- -P- -N- -L- -J- -H- -F- -D- -B-'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   '-z- -y- -x- -w- -v- -u- -t- -s- -r- -q- -p- -o- -n- -m- -l- -k- -j- -i- -h- -g- -f- -e- -d- -c- -b- -a- -`- -_- -^- -]- -\\- -[- -Z- -Y- -X- -W- -V- -U- -T- -S- -R- -Q- -P- -O- -N- -M- -L- -K- -J- -I- -H- -G- -F- -E- -D- -C- -B- -A-\n-z- -x- -v- -t- -r- -p- -n- -l- -j- -h- -f- -d- -b- -`- -^- -\\- -Z- -X- -V- -T- -R- -P- -N- -L- -J- -H- -F- -D- -B-\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in *zsh) echo BUG; exit ;; esac
E           echo -{z..A}-
E           echo -{z..A..2}-
E           # This is exposed a weird bash bug!!!
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::comma and invalid range (adjacent and nested)[L444]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091370b0>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name='comma and invalid range (adjacent and nested)', script="echo -{a,b}{1...3}-\necho -{a,{1...3}}-\necho {...e='stdout', value='-{a,b}{1...3}-\n-{a,{1...3}}-\n{a,b}{}', shells=['osh'], variant='OK')], line_number=444, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: comma and invalid range (adjacent and nested) (line 444)
E           
E           Execution error: maximum recursion depth exceeded
E           
E           
E           Script:
E           ---
E           echo -{a,b}{1...3}-
E           echo -{a,{1...3}}-
E           echo {a,b}{}
E           # osh doesn't expand ANYTHING on invalid syntax.  That's OK because of the test
E           # case below.
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::OSH provides an alternative to invalid syntax[L461]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109137170>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name='OSH provides an alternative to invalid syntax', script='echo -{a,b}\\{1...3\\}-\necho -{a,\\{1...3\\}}-...tdout', value='-a{1...3}- -b{1...3}-\n-a- -{1...3}-\na{} b{}', shells=None, variant=None)], line_number=461, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: OSH provides an alternative to invalid syntax (line 461)
E           
E           stdout mismatch:
E             expected: '-a{1...3}- -b{1...3}-\n-a- -{1...3}-\na{} b{}'
E             actual:   '-a{1...3}- -b{1...3}-\n-{a,{1...3}}-\na{} b{}'
E           
E           Expected stdout: '-a{1...3}- -b{1...3}-\n-a- -{1...3}-\na{} b{}'
E           Actual stdout:   '-a{1...3}- -b{1...3}-\n-{a,{1...3}}-\na{} b{}\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo -{a,b}\{1...3\}-
E           echo -{a,\{1...3\}}-
E           echo {a,b}\{\}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::Invalid brace expansions don't expand[L479]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091372f0>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name="Invalid brace expansions don't expand", script='echo {1.3}\necho {1...3}\necho {1__3}', assertions=[Assertion(type='stdout', value='{1.3}\n{1...3}\n{1__3}', shells=None, variant=None)], line_number=479, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Invalid brace expansions don't expand (line 479)
E           
E           Execution error: maximum recursion depth exceeded
E           
E           
E           Script:
E           ---
E           echo {1.3}
E           echo {1...3}
E           echo {1__3}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[brace-expansion.test.sh::Invalid brace expansions mixing characters and numbers[L489]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091373b0>
test_file = 'brace-expansion.test.sh'
test_case = TestCase(name='Invalid brace expansions mixing characters and numbers', script="# zsh does something crazy like : ; < ...None, variant=None), Assertion(type='stdout', value='BUG', shells=['zsh'], variant='BUG')], line_number=489, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Invalid brace expansions mixing characters and numbers (line 489)
E           
E           stdout mismatch:
E             expected: '{1..a}\n{z..3}'
E             actual:   '1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \\ ] ^ _ ` a\nz y x w v u t s r q p o n m l k j i h g f e d c b a ` _ ^ ] \\ [ Z Y X W V U T S R Q P O N M L K J I H G F E D C B A @ ? > = < ; : 9 8 7 6 5 4 3'
E           
E           Expected stdout: '{1..a}\n{z..3}'
E           Actual stdout:   '1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \\ ] ^ _ ` a\nz y x w v u t s r q p o n m l k j i h g f e d c b a ` _ ^ ] \\ [ Z Y X W V U T S R Q P O N M L K J I H G F E D C B A @ ? > = < ; : 9 8 7 6 5 4 3\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # zsh does something crazy like : ; < = > that I'm not writing
E           case $SH in *zsh) echo BUG; exit ;; esac
E           echo {1..a}
E           echo {z..3}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bugs.test.sh::assign readonly -- one line[L25]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091376b0>
test_file = 'bugs.test.sh'
test_case = TestCase(name='assign readonly -- one line', script='readonly x=1; x=2; echo hi', assertions=[Assertion(type='status',...ksh', 'ash'], variant='OK'), Assertion(type='stdout', value='', shells=None, variant=None)], line_number=25, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: assign readonly -- one line (line 25)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'hi'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'hi\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           readonly x=1; x=2; echo hi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bugs.test.sh::assign readonly -- multiple lines -- set -o posix[L45]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109137830>
test_file = 'bugs.test.sh'
test_case = TestCase(name='assign readonly -- multiple lines -- set -o posix', script='set -o posix\nreadonly x=1\nx=2\necho hi', ...ksh', 'ash'], variant='OK'), Assertion(type='stdout', value='', shells=None, variant=None)], line_number=45, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: assign readonly -- multiple lines -- set -o posix (line 45)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'hi'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'hi\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: posix: invalid option name\n'
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           set -o posix
E           readonly x=1
E           x=2
E           echo hi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bugs.test.sh::First word like foo$x() and foo$[1+2] (regression)[L74]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109137a70>
test_file = 'bugs.test.sh'
test_case = TestCase(name='First word like foo$x() and foo$[1+2] (regression)', script="# Problem: $x() func call broke this error...ksh', 'zsh'], variant='OK'), Assertion(type='stdout', value='', shells=None, variant=None)], line_number=74, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: First word like foo$x() and foo$[1+2] (regression) (line 74)
E           
E           Execution error: Expected ')' after '(' in function definition at line 2, column 14
E           
E           
E           Script:
E           ---
E           # Problem: $x() func call broke this error message
E           foo$identity('z')
E           
E           foo$[1+2]
E           
E           echo DONE
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[bugs.test.sh::Function names[L88]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109137b30>
test_file = 'bugs.test.sh'
test_case = TestCase(name='Function names', script='foo$x() {\n  echo hi\n}\n\nfoo $x() {\n  echo hi\n}\n\n# Note: zsh should retu...lls=['zsh'], variant='BUG'), Assertion(type='stdout', value='', shells=None, variant=None)], line_number=88, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Function names (line 88)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'hi'
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'hi\n'
E           Expected stderr: None
E           Actual stderr:   'bash: foo: command not found\n'
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           foo$x() {
E             echo hi
E           }
E           
E           foo $x() {
E             echo hi
E           }
E           
E           # Note: zsh should return 1 or 2
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[bugs.test.sh::file with NUL byte[L105]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109137bf0>
test_file = 'bugs.test.sh'
test_case = TestCase(name='file with NUL byte', script="echo -e 'echo one \\0 echo two' > tmp.sh\n$SH tmp.sh", assertions=[Asserti..., Assertion(type='stdout-json', value='one \x00echo two\n', shells=['zsh'], variant='OK')], line_number=105, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: file with NUL byte (line 105)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'one \x00 echo two'
E           status mismatch: expected 126, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'one \x00 echo two\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo -e 'echo one \0 echo two' > tmp.sh
E           $SH tmp.sh
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bugs.test.sh::'echo' and printf fail on writing to full disk[L143]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109137d70>
test_file = 'bugs.test.sh'
test_case = TestCase(name="'echo' and printf fail on writing to full disk", script="# Inspired by https://blog.sunfishcode.online/...sertions=[Assertion(type='stdout', value='status=1\nstatus=1', shells=None, variant=None)], line_number=143, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: 'echo' and printf fail on writing to full disk (line 143)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1'
E             actual:   'status=0\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1'
E           Actual stdout:   'status=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Inspired by https://blog.sunfishcode.online/bugs-in-hello-world/
E           
E           echo hi > /dev/full
E           echo status=$?
E           
E           printf '%s\n' hi > /dev/full
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bugs.test.sh::other builtins fail on writing to full disk[L158]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109137e30>
test_file = 'bugs.test.sh'
test_case = TestCase(name='other builtins fail on writing to full disk', script='type echo > /dev/full\necho status=$?\n\n# other ...sertion(type='stdout', value='status=0\nstatus=0', shells=['mksh', 'zsh'], variant='BUG')], line_number=158, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: other builtins fail on writing to full disk (line 158)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1'
E             actual:   'status=0\nstatus=1'
E           
E           Expected stdout: 'status=1\nstatus=1'
E           Actual stdout:   'status=0\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: ulimit: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           type echo > /dev/full
E           echo status=$?
E           
E           # other random builtin
E           ulimit -a > /dev/full
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bugs.test.sh::for loop (issue #1446)[L191]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109137fb0>
test_file = 'bugs.test.sh'
test_case = TestCase(name='for loop (issue #1446)', script='case $SH in dash|mksh|ash) exit ;; esac\n\nfor (( n=0; n<(3-(1)); n++ ...=None), Assertion(type='stdout', value='', shells=['dash', 'mksh', 'ash'], variant='N-I')], line_number=191, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: for loop (issue #1446) (line 191)
E           
E           Execution error: Expected 'do' in for loop at line 3, column 24
E           
E           
E           Script:
E           ---
E           case $SH in dash|mksh|ash) exit ;; esac
E           
E           for (( n=0; n<(3-(1)); n++ )) ; do echo $n; done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bugs.test.sh::for loop 2 (issue #1446)[L205]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091540b0>
test_file = 'bugs.test.sh'
test_case = TestCase(name='for loop 2 (issue #1446)', script='case $SH in dash|mksh|ash) exit ;; esac\n\n\nfor (( n=0; n<(3- (1));...=None), Assertion(type='stdout', value='', shells=['dash', 'mksh', 'ash'], variant='N-I')], line_number=205, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: for loop 2 (issue #1446) (line 205)
E           
E           Execution error: Expected 'do' in for loop at line 4, column 25
E           
E           
E           Script:
E           ---
E           case $SH in dash|mksh|ash) exit ;; esac
E           
E           
E           for (( n=0; n<(3- (1)); n++ )) ; do echo $n; done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bugs.test.sh::autoconf word split (#1449)[L218]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109154170>
test_file = 'bugs.test.sh'
test_case = TestCase(name='autoconf word split (#1449)', script='mysed() {\n  for line in "$@"; do\n    echo "[$line]"\n  done\n}\...--- backticks\n[-n]\n[my sed command]\n[f1]\n[f2]\nNOT SPLIT', shells=None, variant=None)], line_number=218, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: autoconf word split (#1449) (line 218)
E           
E           stdout mismatch:
E             expected: '--- $()\n[-n]\n[my sed command]\n[f1]\n[f2]\n--- backticks\n[-n]\n[my sed command]\n[f1]\n[f2]\nNOT SPLIT'
E             actual:   '--- $()\n[-n]\n[\\my sed command]\n[f1]\n[f2]\n--- backticks\n[-n]\n[my sed command]\n[f1]\n[f2]\nNOT SPLIT'
E           
E           Expected stdout: '--- $()\n[-n]\n[my sed command]\n[f1]\n[f2]\n--- backticks\n[-n]\n[my sed command]\n[f1]\n[f2]\nNOT SPLIT'
E           Actual stdout:   '--- $()\n[-n]\n[\\my sed command]\n[f1]\n[f2]\n--- backticks\n[-n]\n[my sed command]\n[f1]\n[f2]\nNOT SPLIT\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mysed() {
E             for line in "$@"; do
E               echo "[$line]"
E             done
E           }
E           
E           sedinputs="f1 f2"
E           sedscript='my sed command'
E           
E           # Parsed and evaluated correctly: with word_part.EscapedLiteral \"
E           
E           x=$(eval "mysed -n \"\$sedscript\" $sedinputs")
E           echo '--- $()'
E           echo "$x"
E           
E           # With backticks, the \" gets lost somehow
E           
E           x=`eval "mysed -n \"\$sedscript\" $sedinputs"`
E           echo '--- backticks'
E           echo "$x"
E           
E           
E           # Test it in a case statement
E           
E           case `eval "mysed -n \"\$sedscript\" $sedinputs"` in 
E             (*'[my sed command]'*)
E               echo 'NOT SPLIT'
E               ;;
E           esac
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bugs.test.sh::autoconf arithmetic - relaxed eval_unsafe_arith (#1450)[L264]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109154230>
test_file = 'bugs.test.sh'
test_case = TestCase(name='autoconf arithmetic - relaxed eval_unsafe_arith (#1450)', script='as_fn_arith ()\n{\n    as_val=$(( $* ...echo $as_val', assertions=[Assertion(type='stdout', value='2', shells=None, variant=None)], line_number=264, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: autoconf arithmetic - relaxed eval_unsafe_arith (#1450) (line 264)
E           
E           stdout mismatch:
E             expected: '2'
E             actual:   '0'
E           
E           Expected stdout: '2'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           as_fn_arith ()
E           {
E               as_val=$(( $* ))
E           }
E           as_fn_arith 1 + 1
E           echo $as_val
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bugs.test.sh::command execution $(echo 42 | tee PWNED) not allowed[L277]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091542f0>
test_file = 'bugs.test.sh'
test_case = TestCase(name='command execution $(echo 42 | tee PWNED) not allowed', script="rm -f PWNED\n\nx='a[$(echo 42 | tee PWNE...), Assertion(type='stdout', value='1\n42', shells=['bash', 'mksh', 'zsh'], variant='BUG')], line_number=277, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: command execution $(echo 42 | tee PWNED) not allowed (line 277)
E           
E           stdout mismatch:
E             expected: '1\n42'
E             actual:   '0\nNOPE'
E           
E           Expected stdout: '1\n42'
E           Actual stdout:   '0\nNOPE\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           rm -f PWNED
E           
E           x='a[$(echo 42 | tee PWNED)]=1'
E           echo $(( x ))
E           
E           if test -f PWNED; then
E             cat PWNED
E           else
E             echo NOPE
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bugs.test.sh::process sub <(echo 42 | tee PWNED) not allowed[L300]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091543b0>
test_file = 'bugs.test.sh'
test_case = TestCase(name='process sub <(echo 42 | tee PWNED) not allowed', script="rm -f PWNED\n\nx='a[<(echo 42 | tee PWNED)]=1'...], variant='BUG'), Assertion(type='stdout', value='NOPE', shells=['bash'], variant='BUG')], line_number=300, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: process sub <(echo 42 | tee PWNED) not allowed (line 300)
E           
E           stdout mismatch:
E             expected: 'NOPE'
E             actual:   '0\nNOPE'
E           
E           Expected stdout: 'NOPE'
E           Actual stdout:   '0\nNOPE\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           rm -f PWNED
E           
E           x='a[<(echo 42 | tee PWNED)]=1'
E           echo $(( x ))
E           
E           if test -f PWNED; then
E             cat PWNED
E           else
E             echo NOPE
E           fi
E           
E           
E           
E           # bash keeps going
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bugs.test.sh::unset doesn't allow command execution[L324]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109154470>
test_file = 'bugs.test.sh'
test_case = TestCase(name="unset doesn't allow command execution", script="typeset -a a  # for mksh\na=(42)\necho len=${#a[@]}\n\n..., Assertion(type='stdout', value='len=1\nlen=1\nPWNED\n0', shells=['zsh'], variant='BUG')], line_number=324, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: unset doesn't allow command execution (line 324)
E           
E           stdout mismatch:
E             expected: 'len=1\nlen=0\nPWNED\n0'
E             actual:   'len=1\nlen=1\nNOPE'
E           
E           Expected stdout: 'len=1\nlen=0\nPWNED\n0'
E           Actual stdout:   'len=1\nlen=1\nNOPE\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset -a a  # for mksh
E           a=(42)
E           echo len=${#a[@]}
E           
E           unset -v 'a[$(echo 0 | tee PWNED)]'
E           echo len=${#a[@]}
E           
E           if test -f PWNED; then
E             echo PWNED
E             cat PWNED
E           else
E             echo NOPE
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[bugs.test.sh::(( status bug[L376]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091545f0>
test_file = 'bugs.test.sh'
test_case = TestCase(name='(( status bug', script='case $SH in dash|ash) exit ;; esac\n\n# from Koiche on Zulip\n\n(( 1 << 32 ))\n... variant=None), Assertion(type='stdout', value='', shells=['dash', 'ash'], variant='N-I')], line_number=376, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: (( status bug (line 376)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 6, column 1
E           
E           
E           Script:
E           ---
E           case $SH in dash|ash) exit ;; esac
E           
E           # from Koiche on Zulip
E           
E           (( 1 << 32 ))
E           echo status=$?
E           
E           (( 1 << 32 )) && echo yes
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bugs.test.sh::autotools as_fn_arith bug in configure[L395]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091546b0>
test_file = 'bugs.test.sh'
test_case = TestCase(name='autotools as_fn_arith bug in configure', script="# Causes 'grep -e' check to infinite loop.\n# Reduced ...s_val", assertions=[Assertion(type='stdout', value='as_val=1', shells=None, variant=None)], line_number=395, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: autotools as_fn_arith bug in configure (line 395)
E           
E           stdout mismatch:
E             expected: 'as_val=1'
E             actual:   'as_val=0'
E           
E           Expected stdout: 'as_val=1'
E           Actual stdout:   'as_val=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Causes 'grep -e' check to infinite loop.
E           # Reduced from a configure script.
E           
E           as_fn_arith() {
E             as_val=$(( $* ))
E           }
E           
E           as_fn_arith 0 + 1
E           echo as_val=$as_val
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[bugs.test.sh::Crash in {1..10} - issue #2296[L433]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109154830>
test_file = 'bugs.test.sh'
test_case = TestCase(name='Crash in {1..10} - issue #2296', script='{1..10}', assertions=[Assertion(type='status', value=127, shells=None, variant=None), Assertion(type='stdout', value='', shells=None, variant=None)], line_number=433, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Crash in {1..10} - issue #2296 (line 433)
E           
E           status mismatch: expected 127, got 1
E           
E           Expected stdout: ''
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: {1..10}: command not found\n'
E           Expected status: 127
E           Actual status:   1
E           
E           Script:
E           ---
E           {1..10}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-bash.test.sh::bad help topic[L18]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109154a70>
test_file = 'builtin-bash.test.sh'
test_case = TestCase(name='bad help topic', script='help ZZZ 2>$TMP/err.txt\necho "help=$?"\ncat $TMP/err.txt | grep -i \'no help ...', assertions=[Assertion(type='stdout', value='help=1\ngrep=0', shells=None, variant=None)], line_number=18, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: bad help topic (line 18)
E           
E           stdout mismatch:
E             expected: 'help=1\ngrep=0'
E             actual:   'help=127\ngrep=0'
E           
E           Expected stdout: 'help=1\ngrep=0'
E           Actual stdout:   'help=127\ngrep=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           help ZZZ 2>$TMP/err.txt
E           echo "help=$?"
E           cat $TMP/err.txt | grep -i 'no help topics' >/dev/null
E           echo "grep=$?"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[builtin-bash.test.sh::mapfile[L28]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109154b30>
test_file = 'builtin-bash.test.sh'
test_case = TestCase(name='mapfile', script='type mapfile >/dev/null 2>&1 || exit 0\nprintf \'%s\\n\' {1..5..2} | {\n  mapfile\n  ..., Assertion(type='stdout', value='', shells=['dash', 'mksh', 'zsh', 'ash'], variant='N-I')], line_number=28, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: mapfile (line 28)
E           
E           stdout mismatch:
E             expected: 'n=3\n[1\n]\n[3\n]\n[5\n]'
E             actual:   'n=3\n[1]\n[3]\n[5]'
E           
E           Expected stdout: 'n=3\n[1\n]\n[3\n]\n[5\n]'
E           Actual stdout:   'n=3\n[1]\n[3]\n[5]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           type mapfile >/dev/null 2>&1 || exit 0
E           printf '%s\n' {1..5..2} | {
E             mapfile
E             echo "n=${#MAPFILE[@]}"
E             printf '[%s]\n' "${MAPFILE[@]}"
E           }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-bash.test.sh::readarray (synonym for mapfile)[L47]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109154bf0>
test_file = 'builtin-bash.test.sh'
test_case = TestCase(name='readarray (synonym for mapfile)', script='type readarray >/dev/null 2>&1 || exit 0\nprintf \'%s\\n\' {1..., Assertion(type='stdout', value='', shells=['dash', 'mksh', 'zsh', 'ash'], variant='N-I')], line_number=47, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: readarray (synonym for mapfile) (line 47)
E           
E           stdout mismatch:
E             expected: 'n=3\n[1\n]\n[3\n]\n[5\n]'
E             actual:   'n=3\n[1]\n[3]\n[5]'
E           
E           Expected stdout: 'n=3\n[1\n]\n[3\n]\n[5\n]'
E           Actual stdout:   'n=3\n[1]\n[3]\n[5]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           type readarray >/dev/null 2>&1 || exit 0
E           printf '%s\n' {1..5..2} | {
E             readarray
E             echo "n=${#MAPFILE[@]}"
E             printf '[%s]\n' "${MAPFILE[@]}"
E           }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-bash.test.sh::mapfile (array name): arr[L66]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109154cb0>
test_file = 'builtin-bash.test.sh'
test_case = TestCase(name='mapfile (array name): arr', script='type mapfile >/dev/null 2>&1 || exit 0\nprintf \'%s\\n\' {1..5..2} ..., Assertion(type='stdout', value='', shells=['dash', 'mksh', 'zsh', 'ash'], variant='N-I')], line_number=66, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: mapfile (array name): arr (line 66)
E           
E           stdout mismatch:
E             expected: 'n=3\n[1\n]\n[3\n]\n[5\n]'
E             actual:   'n=3\n[1]\n[3]\n[5]'
E           
E           Expected stdout: 'n=3\n[1\n]\n[3\n]\n[5\n]'
E           Actual stdout:   'n=3\n[1]\n[3]\n[5]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           type mapfile >/dev/null 2>&1 || exit 0
E           printf '%s\n' {1..5..2} | {
E             mapfile arr
E             echo "n=${#arr[@]}"
E             printf '[%s]\n' "${arr[@]}"
E           }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-bash.test.sh::mapfile (delimiter): -d delim[L85]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109154d70>
test_file = 'builtin-bash.test.sh'
test_case = TestCase(name='mapfile (delimiter): -d delim', script='# Note: Bash-4.4+\ntype mapfile >/dev/null 2>&1 || exit 0\nprin..., Assertion(type='stdout', value='', shells=['dash', 'mksh', 'zsh', 'ash'], variant='N-I')], line_number=85, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: mapfile (delimiter): -d delim (line 85)
E           
E           stdout mismatch:
E             expected: 'n=3\n[1:]\n[3:]\n[5:]'
E             actual:   'n=4\n[1]\n[3]\n[5]\n[]'
E           
E           Expected stdout: 'n=3\n[1:]\n[3:]\n[5:]'
E           Actual stdout:   'n=4\n[1]\n[3]\n[5]\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Note: Bash-4.4+
E           type mapfile >/dev/null 2>&1 || exit 0
E           printf '%s:' {1..5..2} | {
E             mapfile -d : arr
E             echo "n=${#arr[@]}"
E             printf '[%s]\n' "${arr[@]}"
E           }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-bash.test.sh::mapfile (delimiter): -d '' (null-separated)[L102]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109154e30>
test_file = 'builtin-bash.test.sh'
test_case = TestCase(name="mapfile (delimiter): -d '' (null-separated)", script='# Note: Bash-4.4+\ntype mapfile >/dev/null 2>&1 |... Assertion(type='stdout', value='', shells=['dash', 'mksh', 'zsh', 'ash'], variant='N-I')], line_number=102, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: mapfile (delimiter): -d '' (null-separated) (line 102)
E           
E           stdout mismatch:
E             expected: 'n=3\n[1]\n[3]\n[5]'
E             actual:   'n=4\n[1]\n[3]\n[5]\n[]'
E           
E           Expected stdout: 'n=3\n[1]\n[3]\n[5]'
E           Actual stdout:   'n=4\n[1]\n[3]\n[5]\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Note: Bash-4.4+
E           type mapfile >/dev/null 2>&1 || exit 0
E           printf '%s\0' {1..5..2} | {
E             mapfile -d '' arr
E             echo "n=${#arr[@]}"
E             printf '[%s]\n' "${arr[@]}"
E           }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-bash.test.sh::mapfile -t doesn't remove \\r[L135]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109154fb0>
test_file = 'builtin-bash.test.sh'
test_case = TestCase(name="mapfile -t doesn't remove \\r", script='type mapfile >/dev/null 2>&1 || exit 0\nprintf \'%s\\r\\n\' {1.... Assertion(type='stdout', value='', shells=['dash', 'mksh', 'zsh', 'ash'], variant='N-I')], line_number=135, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: mapfile -t doesn't remove \r (line 135)
E           
E           stdout mismatch:
E             expected: "['1\\r', '3\\r', '5\\r']"
E             actual:   "['1\r', '3\r', '5\r']"
E           
E           Expected stdout: "['1\\r', '3\\r', '5\\r']"
E           Actual stdout:   "['1\r', '3\r', '5\r']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           type mapfile >/dev/null 2>&1 || exit 0
E           printf '%s\r\n' {1..5..2} | {
E             mapfile -t arr
E             argv.py "${arr[@]}"
E           }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-bash.test.sh::mapfile (store position): -O start[L170]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109155130>
test_file = 'builtin-bash.test.sh'
test_case = TestCase(name='mapfile (store position): -O start', script='type mapfile >/dev/null 2>&1 || exit 0\nprintf \'%s\\n\' a... Assertion(type='stdout', value='', shells=['dash', 'mksh', 'zsh', 'ash'], variant='N-I')], line_number=170, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: mapfile (store position): -O start (line 170)
E           
E           stdout mismatch:
E             expected: 'n=5\n[x]\n[y]\n[a0]\n[a1]\n[a2]'
E             actual:   'n=0\n[]'
E           
E           Expected stdout: 'n=5\n[x]\n[y]\n[a0]\n[a1]\n[a2]'
E           Actual stdout:   'n=0\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           type mapfile >/dev/null 2>&1 || exit 0
E           printf '%s\n' a{0..2} | {
E             arr=(x y z)
E             mapfile -O 2 -t arr
E             echo "n=${#arr[@]}"
E             printf '[%s]\n' "${arr[@]}"
E           }
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[builtin-bracket.test.sh::four args[L82]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091557f0>
test_file = 'builtin-bracket.test.sh'
test_case = TestCase(name='four args', script='[ ! foo = foo ]\necho status=$?\n[ \\( -z foo \\) ]\necho status=$?', assertions=[Assertion(type='stdout', value='status=1\nstatus=1', shells=None, variant=None)], line_number=82, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: four args (line 82)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1'
E             actual:   'status=0\nstatus=1'
E           
E           Expected stdout: 'status=1\nstatus=1'
E           Actual stdout:   'status=0\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   "bash: [: missing `]'\nbash: foo: command not found\n"
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           [ ! foo = foo ]
E           echo status=$?
E           [ \( -z foo \) ]
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-bracket.test.sh::( ) ! -a -o with system version of [[L144]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109155d30>
test_file = 'builtin-bracket.test.sh'
test_case = TestCase(name='( ) ! -a -o with system version of [', script="command [ --version\ncommand [ -z '' -a '(' ! -z x ')' ]...echo true", assertions=[Assertion(type='stdout', value='true', shells=None, variant=None)], line_number=144, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ( ) ! -a -o with system version of [ (line 144)
E           
E           Execution error: Expected ')' to close subshell at line 1, column 13
E           
E           
E           Script:
E           ---
E           command [ --version
E           command [ -z '' -a '(' ! -z x ')' ] && echo true
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[builtin-bracket.test.sh::-x[L244]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109156570>
test_file = 'builtin-bracket.test.sh'
test_case = TestCase(name='-x', script="rm -f $TMP/x\necho 'echo hi' > $TMP/x\ntest -x $TMP/x || echo 'no'\nchmod +x $TMP/x\ntest ...'", assertions=[Assertion(type='stdout', value='no\nyes\nbad', shells=None, variant=None)], line_number=244, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: -x (line 244)
E           
E           stdout mismatch:
E             expected: 'no\nyes\nbad'
E             actual:   'yes\nbad'
E           
E           Expected stdout: 'no\nyes\nbad'
E           Actual stdout:   'yes\nbad\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           rm -f $TMP/x
E           echo 'echo hi' > $TMP/x
E           test -x $TMP/x || echo 'no'
E           chmod +x $TMP/x
E           test -x $TMP/x && echo 'yes'
E           test -x $TMP/__nonexistent__ || echo 'bad'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[builtin-bracket.test.sh::-r[L257]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109156630>
test_file = 'builtin-bracket.test.sh'
test_case = TestCase(name='-r', script="echo '1' > $TMP/testr_yes\necho '2' > $TMP/testr_no\nchmod -r $TMP/testr_no  # remove read...o 'no'", assertions=[Assertion(type='stdout', value='yes\nno', shells=None, variant=None)], line_number=257, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: -r (line 257)
E           
E           stdout mismatch:
E             expected: 'yes\nno'
E             actual:   'yes'
E           
E           Expected stdout: 'yes\nno'
E           Actual stdout:   'yes\n'
E           Expected stderr: None
E           Actual stderr:   "chmod: invalid option -- 'r'\n"
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo '1' > $TMP/testr_yes
E           echo '2' > $TMP/testr_no
E           chmod -r $TMP/testr_no  # remove read permission
E           test -r $TMP/testr_yes && echo 'yes'
E           test -r $TMP/testr_no || echo 'no'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[builtin-bracket.test.sh::-w[L268]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091566f0>
test_file = 'builtin-bracket.test.sh'
test_case = TestCase(name='-w', script="rm -f $TMP/testw_*\necho '1' > $TMP/testw_yes\necho '2' > $TMP/testw_no\nchmod -w $TMP/tes...o 'no'", assertions=[Assertion(type='stdout', value='yes\nno', shells=None, variant=None)], line_number=268, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: -w (line 268)
E           
E           stdout mismatch:
E             expected: 'yes\nno'
E             actual:   'yes'
E           
E           Expected stdout: 'yes\nno'
E           Actual stdout:   'yes\n'
E           Expected stderr: None
E           Actual stderr:   "chmod: invalid option -- 'w'\n"
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           rm -f $TMP/testw_*
E           echo '1' > $TMP/testw_yes
E           echo '2' > $TMP/testw_no
E           chmod -w $TMP/testw_no  # remove write permission
E           test -w $TMP/testw_yes && echo 'yes'
E           test -w $TMP/testw_no || echo 'no'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-bracket.test.sh::-k for sticky bit[L280]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091567b0>
test_file = 'builtin-bracket.test.sh'
test_case = TestCase(name='-k for sticky bit', script='# not isolated: /tmp usually has sticky bit on\n# https://en.wikipedia.org/...sertions=[Assertion(type='stdout', value='status=0\nstatus=1', shells=None, variant=None)], line_number=280, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: -k for sticky bit (line 280)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=1'
E             actual:   'status=1\nstatus=1'
E           
E           Expected stdout: 'status=0\nstatus=1'
E           Actual stdout:   'status=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # not isolated: /tmp usually has sticky bit on
E           # https://en.wikipedia.org/wiki/Sticky_bit
E           
E           test -k /tmp
E           echo status=$?
E           
E           test -k /bin
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-bracket.test.sh::test -p named pipe[L407]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109156db0>
test_file = 'builtin-bracket.test.sh'
test_case = TestCase(name='test -p named pipe', script='mkfifo $TMP/fifo\ntest -p $TMP/fifo\necho status=$?\n\ntest -p testdata\ne...sertions=[Assertion(type='stdout', value='status=0\nstatus=1', shells=None, variant=None)], line_number=407, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: test -p named pipe (line 407)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=1'
E             actual:   'status=1\nstatus=1'
E           
E           Expected stdout: 'status=0\nstatus=1'
E           Actual stdout:   'status=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: mkfifo: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkfifo $TMP/fifo
E           test -p $TMP/fifo
E           echo status=$?
E           
E           test -p testdata
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-bracket.test.sh::-u for setuid, -g too[L442]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109156f30>
test_file = 'builtin-bracket.test.sh'
test_case = TestCase(name='-u for setuid, -g too', script='touch $TMP/setuid $TMP/setgid\nchmod u+s $TMP/setuid\nchmod g+s $TMP/se...type='stdout', value='status=0\nstatus=1\nstatus=1\nstatus=0', shells=None, variant=None)], line_number=442, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: -u for setuid, -g too (line 442)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=1\nstatus=1\nstatus=0'
E             actual:   'status=1\nstatus=1\nstatus=1\nstatus=1'
E           
E           Expected stdout: 'status=0\nstatus=1\nstatus=1\nstatus=0'
E           Actual stdout:   'status=1\nstatus=1\nstatus=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch $TMP/setuid $TMP/setgid
E           chmod u+s $TMP/setuid
E           chmod g+s $TMP/setgid
E           
E           test -u $TMP/setuid
E           echo status=$?
E           
E           test -u $TMP/setgid
E           echo status=$?
E           
E           test -g $TMP/setuid
E           echo status=$?
E           
E           test -g $TMP/setgid
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[builtin-bracket.test.sh::-ef[L534]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109157230>
test_file = 'builtin-bracket.test.sh'
test_case = TestCase(name='-ef', script='left=$TMP/left\nright=$TMP/right\ntouch $left $right\n\nln -f $TMP/left $TMP/hardlink\n\n...rtion(type='stdout', value='same\nsame\ndifferent\ndifferent', shells=None, variant=None)], line_number=534, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: -ef (line 534)
E           
E           stdout mismatch:
E             expected: 'same\nsame\ndifferent\ndifferent'
E             actual:   'same\ndifferent\ndifferent'
E           
E           Expected stdout: 'same\nsame\ndifferent\ndifferent'
E           Actual stdout:   'same\ndifferent\ndifferent\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           left=$TMP/left
E           right=$TMP/right
E           touch $left $right
E           
E           ln -f $TMP/left $TMP/hardlink
E           
E           test $left -ef $left && echo same
E           test $left -ef $TMP/hardlink && echo same
E           test $left -ef $right || echo different
E           
E           test $TMP/__nonexistent -ef $right || echo different
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[builtin-bracket.test.sh::test -c[L573]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109157470>
test_file = 'builtin-bracket.test.sh'
test_case = TestCase(name='test -c', script='test -c /dev/zero\necho status=$?', assertions=[Assertion(type='stdout', value='status=0', shells=None, variant=None)], line_number=573, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: test -c (line 573)
E           
E           stdout mismatch:
E             expected: 'status=0'
E             actual:   'status=1'
E           
E           Expected stdout: 'status=0'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           test -c /dev/zero
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-bracket.test.sh::Looks like octal, but digit is too big[L710]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091578f0>
test_file = 'builtin-bracket.test.sh'
test_case = TestCase(name='Looks like octal, but digit is too big', script='# arithmetic has octal conversion\necho $(( 083 ))\nec...value='83\nstatus=0\n-83\nstatus=0\n\nstatus=0\nstatus=0', shells=['mksh'], variant='OK')], line_number=710, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Looks like octal, but digit is too big (line 710)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1\n\nstatus=0\nstatus=0'
E             actual:   '83\nstatus=0\n-83\nstatus=0\n\nstatus=0\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1\n\nstatus=0\nstatus=0'
E           Actual stdout:   '83\nstatus=0\n-83\nstatus=0\n\nstatus=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # arithmetic has octal conversion
E           echo $(( 083 ))
E           echo status=$?
E           
E           echo $(( -083 ))
E           echo status=$?
E           
E           echo
E           
E           # Bracket does NOT have octal conversion!  That is annoying.
E           [ 083 -eq 83 ]
E           echo status=$?
E           
E           [ -083 -eq -83 ]
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[builtin-cd.test.sh::cd BAD/..[L10]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109157b30>
test_file = 'builtin-cd.test.sh'
test_case = TestCase(name='cd BAD/..', script="# Odd divergence in shells: dash and mksh normalize the path and don't check\n# thi... Assertion(type='stdout', value='status=0', shells=['dash', 'ash', 'mksh'], variant='BUG')], line_number=10, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: cd BAD/.. (line 10)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   'status=0'
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Odd divergence in shells: dash and mksh normalize the path and don't check
E           # this error.
E           # TODO: I would like OSH to behave like bash and zsh, but separating chdir_arg
E           # and pwd_arg breaks case 17.
E           
E           cd nonexistent_ZZ/..
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-cd.test.sh::cd with 2 or more args - with strict_arg_parse[L26]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109157bf0>
test_file = 'builtin-cd.test.sh'
test_case = TestCase(name='cd with 2 or more args - with strict_arg_parse', script="shopt -s strict_arg_parse\n\nmkdir -p foo\ncd ...ssertion(type='stdout', value='status=0\nstatus=0', shells=['dash', 'ash'], variant='N-I')], line_number=26, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: cd with 2 or more args - with strict_arg_parse (line 26)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=0\nfailed with multiple args'
E             actual:   'status=0\nstatus=0'
E           
E           Expected stdout: 'status=0\nstatus=0\nfailed with multiple args'
E           Actual stdout:   'status=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s strict_arg_parse
E           
E           mkdir -p foo
E           cd foo
E           echo status=$?
E           cd ..
E           echo status=$?
E           
E           
E           cd foo bar
E           st=$?
E           if test $st -ne 0; then
E             echo 'failed with multiple args'
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-cd.test.sh::cd - without OLDPWD[L64]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109157d70>
test_file = 'builtin-cd.test.sh'
test_case = TestCase(name='cd - without OLDPWD', script='cd - > /dev/null  # silence dash output\necho status=$?\n#pwd', assertion..., Assertion(type='stdout', value='status=0', shells=['dash', 'ash', 'zsh'], variant='BUG')], line_number=64, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: cd - without OLDPWD (line 64)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   'status=0'
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           cd - > /dev/null  # silence dash output
E           echo status=$?
E           #pwd
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-cd.test.sh::pwd in symlinked dir on shell initialization[L216]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917c530>
test_file = 'builtin-cd.test.sh'
test_case = TestCase(name='pwd in symlinked dir on shell initialization', script="tmp=$TMP/builtins-pwd-2\nmkdir -p $tmp\nmkdir -p...mksh'], variant='OK'), Assertion(type='stderr-json', value='', shells=None, variant=None)], line_number=216, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: pwd in symlinked dir on shell initialization (line 216)
E           
E           stdout mismatch:
E             expected: 'symlink\ntarget'
E             actual:   'symlink\nsymlink'
E           
E           Expected stdout: 'symlink\ntarget'
E           Actual stdout:   'symlink\nsymlink\n'
E           Expected stderr: ''
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           tmp=$TMP/builtins-pwd-2
E           mkdir -p $tmp
E           mkdir -p $tmp/target
E           ln -s -f $tmp/target $tmp/symlink
E           
E           cd $tmp/symlink
E           $SH -c 'basename $(pwd)'
E           unset PWD
E           $SH -c 'basename $(pwd)'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-cd.test.sh::Test the current directory after 'cd ..' involving symlinks[L237]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917c5f0>
test_file = 'builtin-cd.test.sh'
test_case = TestCase(name="Test the current directory after 'cd ..' involving symlinks", script='dir=$TMP/symlinktest\nmkdir -p $d...n b/)\nls', assertions=[Assertion(type='stdout', value='a\nc', shells=None, variant=None)], line_number=237, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Test the current directory after 'cd ..' involving symlinks (line 237)
E           
E           stdout mismatch:
E             expected: 'a\nc'
E             actual:   'a  c'
E           
E           Expected stdout: 'a\nc'
E           Actual stdout:   'a  c\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           dir=$TMP/symlinktest
E           mkdir -p $dir
E           cd $dir
E           mkdir -p a/b/c
E           mkdir -p a/b/d
E           ln -s -f a/b/c c > /dev/null
E           cd c
E           cd ..
E           # Expecting a c/ (since we are in symlinktest) but osh gives c d (thinks we are
E           # in b/)
E           ls
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-cd.test.sh::cd permits double bare dash[L280]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917c8f0>
test_file = 'builtin-cd.test.sh'
test_case = TestCase(name='cd permits double bare dash', script='cd -- /\necho $PWD', assertions=[Assertion(type='stdout', value='/', shells=None, variant=None)], line_number=280, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: cd permits double bare dash (line 280)
E           
E           stdout mismatch:
E             expected: '/'
E             actual:   '/tmp'
E           
E           Expected stdout: '/'
E           Actual stdout:   '/tmp\n'
E           Expected stderr: None
E           Actual stderr:   'bash: cd: --: No such file or directory\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           cd -- /
E           echo $PWD
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-cd.test.sh::cd to symlink with -L and -P[L285]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917c9b0>
test_file = 'builtin-cd.test.sh'
test_case = TestCase(name='cd to symlink with -L and -P', script='targ=$TMP/cd-symtarget\nlnk=$TMP/cd-symlink\nmkdir -p $targ\nln ...PWD', assertions=[Assertion(type='stdout', value='OK\nOK\nOK', shells=None, variant=None)], line_number=285, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: cd to symlink with -L and -P (line 285)
E           
E           stdout mismatch:
E             expected: 'OK\nOK\nOK'
E             actual:   'OK\nOK\n/tmp/cd-symlink'
E           
E           Expected stdout: 'OK\nOK\nOK'
E           Actual stdout:   'OK\nOK\n/tmp/cd-symlink\n'
E           Expected stderr: None
E           Actual stderr:   'bash: cd: -L: No such file or directory\nbash: cd: -P: No such file or directory\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           targ=$TMP/cd-symtarget
E           lnk=$TMP/cd-symlink
E           mkdir -p $targ
E           ln -s $targ $lnk
E           
E           # -L behavior is the default
E           cd $lnk
E           test $PWD = "$TMP/cd-symlink" && echo OK
E           
E           cd -L $lnk
E           test $PWD = "$TMP/cd-symlink" && echo OK
E           
E           cd -P $lnk
E           test $PWD = "$TMP/cd-symtarget" && echo OK || echo $PWD
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-cd.test.sh::cd to relative path with -L and -P[L306]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917ca70>
test_file = 'builtin-cd.test.sh'
test_case = TestCase(name='cd to relative path with -L and -P', script='die() { echo "$@"; exit 1; }\n\ntarg=$TMP/cd-symtarget/sub...PWD', assertions=[Assertion(type='stdout', value='OK\nOK\nOK', shells=None, variant=None)], line_number=306, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: cd to relative path with -L and -P (line 306)
E           
E           stdout mismatch:
E             expected: 'OK\nOK\nOK'
E             actual:   'OK\n/tmp/cd-symlink/subdir'
E           
E           Expected stdout: 'OK\nOK\nOK'
E           Actual stdout:   'OK\n/tmp/cd-symlink/subdir\n'
E           Expected stderr: None
E           Actual stderr:   'bash: cd: -L: No such file or directory\nbash: cd: -P: No such file or directory\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           die() { echo "$@"; exit 1; }
E           
E           targ=$TMP/cd-symtarget/subdir
E           lnk=$TMP/cd-symlink
E           mkdir -p $targ
E           ln -s $TMP/cd-symtarget $lnk
E           
E           # -L behavior is the default
E           cd $lnk/subdir
E           test $PWD = "$TMP/cd-symlink/subdir" || die "failed"
E           cd ..
E           test $PWD = "$TMP/cd-symlink" && echo OK
E           
E           cd $lnk/subdir
E           test $PWD = "$TMP/cd-symlink/subdir" || die "failed"
E           cd -L ..
E           test $PWD = "$TMP/cd-symlink" && echo OK
E           
E           cd $lnk/subdir
E           test $PWD = "$TMP/cd-symlink/subdir" || die "failed"
E           cd -P ..
E           test $PWD = "$TMP/cd-symtarget" && echo OK || echo $PWD
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-cd.test.sh::CDPATH is respected[L344]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917cbf0>
test_file = 'builtin-cd.test.sh'
test_case = TestCase(name='CDPATH is respected', script="mkdir -p /tmp/spam/foo /tmp/eggs/foo\n\nCDPATH='/tmp/spam:/tmp/eggs'\n\nc... Assertion(type='stdout', value='status=0\n/tmp/spam/foo', shells=['zsh'], variant='BUG')], line_number=344, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: CDPATH is respected (line 344)
E           
E           stdout mismatch:
E             expected: '/tmp/spam/foo\nstatus=0\n/tmp/spam/foo'
E             actual:   'status=1\n/tmp'
E           
E           Expected stdout: '/tmp/spam/foo\nstatus=0\n/tmp/spam/foo'
E           Actual stdout:   'status=1\n/tmp\n'
E           Expected stderr: None
E           Actual stderr:   'bash: cd: foo: No such file or directory\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkdir -p /tmp/spam/foo /tmp/eggs/foo
E           
E           CDPATH='/tmp/spam:/tmp/eggs'
E           
E           cd foo
E           echo status=$?
E           pwd
E           
E           
E           # doesn't print the dir
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-cd.test.sh::Change directory in non-shell parent process (make or Python)[L367]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917ccb0>
test_file = 'builtin-cd.test.sh'
test_case = TestCase(name='Change directory in non-shell parent process (make or Python)', script='# inspired by Perl package bug\...ssertions=[Assertion(type='stdout', value='/cpan/Encode/Byte', shells=None, variant=None)], line_number=367, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Change directory in non-shell parent process (make or Python) (line 367)
E           
E           stdout mismatch:
E             expected: '/cpan/Encode/Byte'
E             actual:   ''
E           
E           Expected stdout: '/cpan/Encode/Byte'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # inspired by Perl package bug
E           
E           old_dir=$(pwd)
E           
E           mkdir -p cpan/Encode/Byte
E           
E           # Simulate make changing the dir
E           wrapped_chdir() {
E             #set -- $SH -c 'echo BEFORE; pwd; echo CD; cd Byte; echo AFTER; pwd'
E           
E             set -- $SH -c 'cd Byte; pwd'
E             # strace comes out the same - one getcwd() and one chdir()
E             #set -- strace -e 'getcwd,chdir' "$@"
E           
E             python2 -c '
E           from __future__ import print_function
E           import os, sys, subprocess
E           
E           argv = sys.argv[1:]
E           print("Python PWD = %r" % os.getenv("PWD"), file=sys.stderr)
E           print("Python argv = %r" % argv, file=sys.stderr)
E           
E           os.chdir("cpan/Encode")
E           subprocess.check_call(argv)
E           ' "$@"
E           }
E           
E           #wrapped_chdir
E           new_dir=$(wrapped_chdir)
E           
E           #echo $old_dir
E           
E           # Make the test insensitive to absolute paths
E           echo "${new_dir##$old_dir}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-cd.test.sh::What happens when inherited $PWD and current dir disagree?[L409]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917cd70>
test_file = 'builtin-cd.test.sh'
test_case = TestCase(name='What happens when inherited $PWD and current dir disagree?', script='DIR=/tmp/osh-spec-cd\nmkdir -p $DI...-cd/cpan/Encode/Byte\n/tmp/osh-spec-cd/cpan/Encode/Byte', shells=['mksh'], variant='BUG')], line_number=409, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: What happens when inherited $PWD and current dir disagree? (line 409)
E           
E           stdout mismatch:
E             expected: "Python PWD = '/tmp/osh-spec-cd'\nPWD = /tmp/osh-spec-cd/cpan/Encode\n/tmp/osh-spec-cd/cpan/Encode\ncd=0\nPWD = /tmp/osh-spec-cd/cpan/Encode/Byte\n/tmp/osh-spec-cd/cpan/Encode/Byte"
E             actual:   ''
E           
E           Expected stdout: "Python PWD = '/tmp/osh-spec-cd'\nPWD = /tmp/osh-spec-cd/cpan/Encode\n/tmp/osh-spec-cd/cpan/Encode\ncd=0\nPWD = /tmp/osh-spec-cd/cpan/Encode/Byte\n/tmp/osh-spec-cd/cpan/Encode/Byte"
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: python2: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           DIR=/tmp/osh-spec-cd
E           mkdir -p $DIR
E           cd $DIR
E           
E           old_dir=$(pwd)
E           
E           mkdir -p cpan/Encode/Byte
E           
E           # Simulate make changing the dir
E           wrapped_chdir() {
E             #set -- $SH -c 'echo BEFORE; pwd; echo CD; cd Byte; echo AFTER; pwd'
E           
E             # disagreement before we gert here
E             set -- $SH -c '
E           echo "PWD = $PWD"; pwd
E           cd Byte; echo cd=$?
E           echo "PWD = $PWD"; pwd
E           '
E           
E             # strace comes out the same - one getcwd() and one chdir()
E             #set -- strace -e 'getcwd,chdir' "$@"
E           
E             python2 -c '
E           from __future__ import print_function
E           import os, sys, subprocess
E           
E           argv = sys.argv[1:]
E           print("Python argv = %r" % argv, file=sys.stderr)
E           
E           os.chdir("cpan/Encode")
E           print("Python PWD = %r" % os.getenv("PWD"), file=sys.stdout)
E           sys.stdout.flush()
E           
E           subprocess.check_call(argv)
E           ' "$@"
E           }
E           
E           #unset PWD
E           wrapped_chdir
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::complete with no args and complete -p both print completion spec[L4]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917d130>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='complete with no args and complete -p both print completion spec', script="set -e\n\ncomplete\n\ncomple...and\ncomplete -W 'foo bar' mycommand\ncomplete -F myfunc other", shells=None, variant=None)], line_number=4, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: complete with no args and complete -p both print completion spec (line 4)
E           
E           stdout mismatch:
E             expected: "complete -W 'foo bar' mycommand\ncomplete -W 'foo bar' mycommand\ncomplete -F myfunc other"
E             actual:   ''
E           
E           Expected stdout: "complete -W 'foo bar' mycommand\ncomplete -W 'foo bar' mycommand\ncomplete -F myfunc other"
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: complete: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           set -e
E           
E           complete
E           
E           complete -W 'foo bar' mycommand
E           
E           complete -p
E           
E           complete -F myfunc other
E           
E           complete
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::complete -F f is usage error[L23]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917d1f0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='complete -F f is usage error', script='#complete -F f cmd\n\n# Alias for complete -p\ncomplete > /dev/n...ssertions=[Assertion(type='stdout', value='status=0\nstatus=2', shells=None, variant=None)], line_number=23, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: complete -F f is usage error (line 23)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=2'
E             actual:   'status=1\nstatus=1'
E           
E           Expected stdout: 'status=0\nstatus=2'
E           Actual stdout:   'status=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: complete: command not found\nbash: complete: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           #complete -F f cmd
E           
E           # Alias for complete -p
E           complete > /dev/null  # ignore OSH output for now
E           echo status=$?
E           
E           # But this is an error
E           complete -F f
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::complete with nonexistent function[L39]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917d2b0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='complete with nonexistent function', script='complete -F invalidZZ -D\necho status=$?', assertions=[Ass... variant=None), Assertion(type='stdout', value='status=0', shells=['bash'], variant='BUG')], line_number=39, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: complete with nonexistent function (line 39)
E           
E           stdout mismatch:
E             expected: 'status=0'
E             actual:   'status=1'
E           
E           Expected stdout: 'status=0'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: complete: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           complete -F invalidZZ -D
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::complete with no action[L45]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917d370>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='complete with no action', script='complete foo\necho status=$?', assertions=[Assertion(type='stdout', v... variant=None), Assertion(type='stdout', value='status=0', shells=['bash'], variant='BUG')], line_number=45, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: complete with no action (line 45)
E           
E           stdout mismatch:
E             expected: 'status=0'
E             actual:   'status=1'
E           
E           Expected stdout: 'status=0'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: complete: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           complete foo
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::-A function prints functions[L51]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917d430>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='-A function prints functions', script='add () { expr 4 + 4; }\ndiv () { expr 6 / 2; }\nek () { echo hel...(type='stdout', value='__ec\n_ab\nadd\ndiv\nek\n--\n__ec\n_ab', shells=None, variant=None)], line_number=51, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: -A function prints functions (line 51)
E           
E           stdout mismatch:
E             expected: '__ec\n_ab\nadd\ndiv\nek\n--\n__ec\n_ab'
E             actual:   '--'
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: '__ec\n_ab\nadd\ndiv\nek\n--\n__ec\n_ab'
E           Actual stdout:   '--\n'
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\nbash: compgen: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           add () { expr 4 + 4; }
E           div () { expr 6 / 2; }
E           ek () { echo hello; }
E           __ec () { echo hi; }
E           _ab () { expr 10 % 3; }
E           compgen -A function
E           echo --
E           compgen -A function _
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::Invalid syntax[L72]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917d4f0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='Invalid syntax', script='compgen -A foo\necho status=$?', assertions=[Assertion(type='stdout', value='status=2', shells=None, variant=None)], line_number=72, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Invalid syntax (line 72)
E           
E           stdout mismatch:
E             expected: 'status=2'
E             actual:   'status=1'
E           
E           Expected stdout: 'status=2'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           compgen -A foo
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::how compgen calls completion functions[L77]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917d5b0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='how compgen calls completion functions', script='foo_complete() {\n  # first, cur, prev\n  argv.py argv...-1']\n['COMP_LINE', '']\n['COMP_POINT', '0']\none\ntwo\nthree", shells=None, variant=None)], line_number=77, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: how compgen calls completion functions (line 77)
E           
E           stdout mismatch:
E             expected: "['argv', 'compgen', 'foo', '']\n['COMP_WORDS']\n['COMP_CWORD', '-1']\n['COMP_LINE', '']\n['COMP_POINT', '0']\none\ntwo\nthree"
E             actual:   ''
E           
E           Expected stdout: "['argv', 'compgen', 'foo', '']\n['COMP_WORDS']\n['COMP_CWORD', '-1']\n['COMP_LINE', '']\n['COMP_POINT', '0']\none\ntwo\nthree"
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           foo_complete() {
E             # first, cur, prev
E             argv.py argv "$@"
E             argv.py COMP_WORDS "${COMP_WORDS[@]}"
E             argv.py COMP_CWORD "${COMP_CWORD}"
E             argv.py COMP_LINE "${COMP_LINE}"
E             argv.py COMP_POINT "${COMP_POINT}"
E             #return 124
E             COMPREPLY=(one two three)
E           }
E           compgen -F foo_complete foo a b c
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::complete -o -F (git)[L100]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917d670>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='complete -o -F (git)', script='foo() { echo foo; }\nwrapper=foo\ncomplete -o default -o nospace -F $wrapper git', assertions=[Assertion(type='status', value=0, shells=None, variant=None)], line_number=100, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: complete -o -F (git) (line 100)
E           
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: complete: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           foo() { echo foo; }
E           wrapper=foo
E           complete -o default -o nospace -F $wrapper git
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compopt with invalid syntax[L106]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917d730>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compopt with invalid syntax', script='compopt -o invalid\necho status=$?', assertions=[Assertion(type='stdout', value='status=2', shells=None, variant=None)], line_number=106, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compopt with invalid syntax (line 106)
E           
E           stdout mismatch:
E             expected: 'status=2'
E             actual:   'status=1'
E           
E           Expected stdout: 'status=2'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: compopt: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           compopt -o invalid
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -f[L121]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917d970>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -f', script='mkdir -p $TMP/compgen\ntouch $TMP/compgen/{one,two,three}\ncd $TMP/compgen\ncompge...ertion(type='stdout', value='one\nthree\ntwo\n--\nthree\ntwo', shells=None, variant=None)], line_number=121, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -f (line 121)
E           
E           stdout mismatch:
E             expected: 'one\nthree\ntwo\n--\nthree\ntwo'
E             actual:   '--'
E           
E           Expected stdout: 'one\nthree\ntwo\n--\nthree\ntwo'
E           Actual stdout:   '--\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkdir -p $TMP/compgen
E           touch $TMP/compgen/{one,two,three}
E           cd $TMP/compgen
E           compgen -f | sort
E           echo --
E           compgen -f t | sort
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -v with local vars[L137]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917da30>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -v with local vars', script='v1_global=0\nf() {\n  local v2_local=0\t \n  compgen -v v\n}\nf', assertions=[Assertion(type='stdout', value='v1_global\nv2_local', shells=None, variant=None)], line_number=137, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -v with local vars (line 137)
E           
E           stdout mismatch:
E             expected: 'v1_global\nv2_local'
E             actual:   ''
E           
E           Expected stdout: 'v1_global\nv2_local'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           v1_global=0
E           f() {
E             local v2_local=0	 
E             compgen -v v
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -v P[L154]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917dbb0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -v P', script="cd > /dev/null  # for some reason in bash, this makes PIPESTATUS appear!\ncompge...sort", assertions=[Assertion(type='stdout', value='PATH\nPWD', shells=None, variant=None)], line_number=154, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -v P (line 154)
E           
E           stdout mismatch:
E             expected: 'PATH\nPWD'
E             actual:   ''
E           
E           Expected stdout: 'PATH\nPWD'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           cd > /dev/null  # for some reason in bash, this makes PIPESTATUS appear!
E           compgen -v P | grep -E '^PATH|PWD' | sort
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -e with global/local exported vars[L162]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917dc70>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -e with global/local exported vars', script='export v1_global=0\nf() {\n  local v2_local=0\n  e...ertions=[Assertion(type='stdout', value='v1_global\nv2_local', shells=None, variant=None)], line_number=162, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -e with global/local exported vars (line 162)
E           
E           stdout mismatch:
E             expected: 'v1_global\nv2_local'
E             actual:   ''
E           
E           Expected stdout: 'v1_global\nv2_local'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           export v1_global=0
E           f() {
E             local v2_local=0
E             export v2_local
E             compgen -e v
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -e P[L186]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917deb0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -e P', script="cd > /dev/null  # for some reason in bash, this makes PIPESTATUS appear!\ncompge...sort", assertions=[Assertion(type='stdout', value='PATH\nPWD', shells=None, variant=None)], line_number=186, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -e P (line 186)
E           
E           stdout mismatch:
E             expected: 'PATH\nPWD'
E             actual:   ''
E           
E           Expected stdout: 'PATH\nPWD'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           cd > /dev/null  # for some reason in bash, this makes PIPESTATUS appear!
E           compgen -e P | grep -E '^PATH|PWD' | sort
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen with actions: function / variable / file[L194]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917df70>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen with actions: function / variable / file', script='mkdir -p $TMP/compgen2\ntouch $TMP/compgen2/...ions=[Assertion(type='stdout', value='PA_FUNC\nPATH\nPA_FILE', shells=None, variant=None)], line_number=194, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen with actions: function / variable / file (line 194)
E           
E           stdout mismatch:
E             expected: 'PA_FUNC\nPATH\nPA_FILE'
E             actual:   ''
E           
E           Expected stdout: 'PA_FUNC\nPATH\nPA_FILE'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           mkdir -p $TMP/compgen2
E           touch $TMP/compgen2/{PA,Q}_FILE
E           cd $TMP/compgen2  # depends on previous test above!
E           PA_FUNC() { echo P; }
E           Q_FUNC() { echo Q; }
E           compgen -A function -A variable -A file PA
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen with actions: alias, setopt[L207]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917e030>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen with actions: alias, setopt', script="alias v_alias='ls'\nalias v_alias2='ls'\nalias a1='ls'\nc...sertion(type='stdout', value='v_alias\nv_alias2\nverbose\nvi', shells=None, variant=None)], line_number=207, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen with actions: alias, setopt (line 207)
E           
E           stdout mismatch:
E             expected: 'v_alias\nv_alias2\nverbose\nvi'
E             actual:   ''
E           
E           Expected stdout: 'v_alias\nv_alias2\nverbose\nvi'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           alias v_alias='ls'
E           alias v_alias2='ls'
E           alias a1='ls'
E           compgen -A alias -A setopt v
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen with actions: shopt[L220]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917e0f0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen with actions: shopt', script='compgen -A shopt -P [ -S ] nu', assertions=[Assertion(type='stdout', value='[nullglob]', shells=None, variant=None)], line_number=220, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen with actions: shopt (line 220)
E           
E           stdout mismatch:
E             expected: '[nullglob]'
E             actual:   ''
E           
E           Expected stdout: '[nullglob]'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           compgen -A shopt -P [ -S ] nu
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen with action and suffix: helptopic[L226]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917e1b0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen with action and suffix: helptopic', script='compgen -A helptopic -S ___ fal', assertions=[Assertion(type='stdout', value='false___', shells=None, variant=None)], line_number=226, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen with action and suffix: helptopic (line 226)
E           
E           stdout mismatch:
E             expected: 'false___'
E             actual:   ''
E           
E           Expected stdout: 'false___'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           compgen -A helptopic -S ___ fal
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -A directory[L232]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917e270>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -A directory', script='# Create test directories inline\nmkdir -p /tmp/completion-test/client /...ssertions=[Assertion(type='stdout', value='client\ncore\ncpp', shells=None, variant=None)], line_number=232, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -A directory (line 232)
E           
E           stdout mismatch:
E             expected: 'client\ncore\ncpp'
E             actual:   ''
E           
E           Expected stdout: 'client\ncore\ncpp'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Create test directories inline
E           mkdir -p /tmp/completion-test/client /tmp/completion-test/core /tmp/completion-test/cpp
E           cd /tmp/completion-test
E           compgen -A directory c | sort
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -A file[L243]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917e330>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -A file', script='# Create test files/directories inline\nmkdir -p /tmp/completion-test2/opy /t...=[Assertion(type='stdout', value='oils-version.txt\nopy\nosh', shells=None, variant=None)], line_number=243, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -A file (line 243)
E           
E           stdout mismatch:
E             expected: 'oils-version.txt\nopy\nosh'
E             actual:   ''
E           
E           Expected stdout: 'oils-version.txt\nopy\nosh'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Create test files/directories inline
E           mkdir -p /tmp/completion-test2/opy /tmp/completion-test2/osh
E           touch /tmp/completion-test2/oils-version.txt
E           cd /tmp/completion-test2
E           compgen -A file o | sort
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -A user[L255]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917e3f0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -A user', script="# no assertion because this isn't hermetic\ncompgen -A user", assertions=[Assertion(type='status', value=0, shells=None, variant=None)], line_number=255, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -A user (line 255)
E           
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           # no assertion because this isn't hermetic
E           compgen -A user
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -A command completes external commands[L260]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917e4b0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -A command completes external commands', script="# NOTE: this test isn't hermetic\ncompgen -A c... assertions=[Assertion(type='stdout', value='xargs\nstatus=0', shells=None, variant=None)], line_number=260, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -A command completes external commands (line 260)
E           
E           stdout mismatch:
E             expected: 'xargs\nstatus=0'
E             actual:   'status=0'
E           
E           Expected stdout: 'xargs\nstatus=0'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # NOTE: this test isn't hermetic
E           compgen -A command xarg | uniq
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -A command completes functions and aliases[L269]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917e570>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -A command completes functions and aliases', script="our_func() { echo ; }\nour_func2() { echo ...tatus=0\nour_alias\nour_func\nour_func2\nour_func3\nstatus=0', shells=None, variant=None)], line_number=269, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -A command completes functions and aliases (line 269)
E           
E           stdout mismatch:
E             expected: 'our_alias\nour_func\nour_func2\nstatus=0\nour_alias\nour_func\nour_func2\nour_func3\nstatus=0'
E             actual:   'status=1\nstatus=1'
E           
E           Expected stdout: 'our_alias\nour_func\nour_func2\nstatus=0\nour_alias\nour_func\nour_func2\nour_func3\nstatus=0'
E           Actual stdout:   'status=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\nbash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           our_func() { echo ; }
E           our_func2() { echo ; }
E           alias our_alias=foo
E           
E           compgen -A command our_
E           echo status=$?
E           
E           # Introduce another function.  Note that we're missing test coverage for
E           # 'complete', i.e. bug #1064.
E           our_func3() { echo ; }
E           
E           compgen -A command our_
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -A command completes builtins and keywords[L296]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917e630>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -A command completes builtins and keywords', script='compgen -A command eva\necho status=$?\nco...ertion(type='stdout', value='eval\nstatus=0\nwhile\nstatus=0', shells=None, variant=None)], line_number=296, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -A command completes builtins and keywords (line 296)
E           
E           stdout mismatch:
E             expected: 'eval\nstatus=0\nwhile\nstatus=0'
E             actual:   'status=1\nstatus=1'
E           
E           Expected stdout: 'eval\nstatus=0\nwhile\nstatus=0'
E           Actual stdout:   'status=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\nbash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           compgen -A command eva
E           echo status=$?
E           compgen -A command whil
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -k shows the same keywords as bash[L308]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917e6f0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -k shows the same keywords as bash', script="# bash adds ]] and } and coproc\n\n# Use bash as a...c\nfi\nfor\nfunction\nif\nin\nthen\ntime\nuntil\nwhile\n{\n}', shells=None, variant=None)], line_number=308, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -k shows the same keywords as bash (line 308)
E           
E           stdout mismatch:
E             expected: '!\n[[\n]]\ncase\ndo\ndone\nelif\nelse\nesac\nfi\nfor\nfunction\nif\nin\nthen\ntime\nuntil\nwhile\n{\n}'
E             actual:   ''
E           
E           Expected stdout: '!\n[[\n]]\ncase\ndo\ndone\nelif\nelse\nesac\nfi\nfor\nfunction\nif\nin\nthen\ntime\nuntil\nwhile\n{\n}'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # bash adds ]] and } and coproc
E           
E           # Use bash as an oracle
E           bash -c 'compgen -k' | sort > bash.txt
E           
E           # osh vs. bash, or bash vs. bash
E           $SH -c 'compgen -k' | sort > this-shell.txt
E           
E           #comm bash.txt this-shell.txt
E           
E           # show lines in both files
E           comm -12 bash.txt this-shell.txt | egrep -v 'coproc|select'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -k completes reserved shell keywords[L375]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917e870>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -k completes reserved shell keywords', script='compgen -k do | sort\necho status=$?\ncompgen -k...pe='stdout', value='do\ndone\nstatus=0\nelif\nelse\nstatus=0', shells=None, variant=None)], line_number=375, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -k completes reserved shell keywords (line 375)
E           
E           stdout mismatch:
E             expected: 'do\ndone\nstatus=0\nelif\nelse\nstatus=0'
E             actual:   'status=0\nstatus=0'
E           
E           Expected stdout: 'do\ndone\nstatus=0\nelif\nelse\nstatus=0'
E           Actual stdout:   'status=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           compgen -k do | sort
E           echo status=$?
E           compgen -k el | sort
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::-o filenames and -o nospace have no effect with compgen[L389]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917e930>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='-o filenames and -o nospace have no effect with compgen', script="# they are POSTPROCESSING.\ncompgen -...ld'", assertions=[Assertion(type='stdout', value='bin\nbuild', shells=None, variant=None)], line_number=389, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: -o filenames and -o nospace have no effect with compgen (line 389)
E           
E           stdout mismatch:
E             expected: 'bin\nbuild'
E             actual:   ''
E           
E           Expected stdout: 'bin\nbuild'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # they are POSTPROCESSING.
E           compgen -o filenames -o nospace -W 'bin build'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::-o plusdirs and -o dirnames with compgen[L397]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917e9f0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='-o plusdirs and -o dirnames with compgen', script="# Create test directories inline\nmkdir -p /tmp/plus...s\nbin\nbuild\nbuiltin\n---\nbenchmarks\nbin\nbuild\nbuiltin', shells=None, variant=None)], line_number=397, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: -o plusdirs and -o dirnames with compgen (line 397)
E           
E           stdout mismatch:
E             expected: 'b1\nb2\nbenchmarks\nbin\nbuild\nbuiltin\n---\nbenchmarks\nbin\nbuild\nbuiltin'
E             actual:   '---'
E           
E           Expected stdout: 'b1\nb2\nbenchmarks\nbin\nbuild\nbuiltin\n---\nbenchmarks\nbin\nbuild\nbuiltin'
E           Actual stdout:   '---\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Create test directories inline
E           mkdir -p /tmp/plusdirs-test/benchmarks /tmp/plusdirs-test/bin /tmp/plusdirs-test/build /tmp/plusdirs-test/builtin
E           cd /tmp/plusdirs-test
E           compgen -o plusdirs -W 'a b1 b2' b | sort
E           echo ---
E           compgen -o dirnames b | sort
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -o default completes files and dirs[L418]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917eab0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -o default completes files and dirs', script='# Create test structure inline\nmkdir -p /tmp/com...-posix.test.sh\nspec/toysh.test.sh\nspec/type-compat.test.sh', shells=None, variant=None)], line_number=418, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -o default completes files and dirs (line 418)
E           
E           stdout mismatch:
E             expected: 'spec/temp-binding.test.sh\nspec/testdata\nspec/tilde.test.sh\nspec/toysh-posix.test.sh\nspec/toysh.test.sh\nspec/type-compat.test.sh'
E             actual:   ''
E           
E           Expected stdout: 'spec/temp-binding.test.sh\nspec/testdata\nspec/tilde.test.sh\nspec/toysh-posix.test.sh\nspec/toysh.test.sh\nspec/type-compat.test.sh'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Create test structure inline
E           mkdir -p /tmp/compgen-default-test/spec/testdata
E           touch /tmp/compgen-default-test/spec/temp-binding.test.sh
E           touch /tmp/compgen-default-test/spec/tilde.test.sh
E           touch /tmp/compgen-default-test/spec/toysh-posix.test.sh
E           touch /tmp/compgen-default-test/spec/toysh.test.sh
E           touch /tmp/compgen-default-test/spec/type-compat.test.sh
E           cd /tmp/compgen-default-test
E           compgen -o default spec/t | sort
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen doesn't respect -X for user-defined functions[L437]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917eb70>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name="compgen doesn't respect -X for user-defined functions", script='# Test that -X filter works with -F cal...ns=[Assertion(type='stdout', value='one\nthree\n--\ntwo\nbin', shells=None, variant=None)], line_number=437, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen doesn't respect -X for user-defined functions (line 437)
E           
E           stdout mismatch:
E             expected: 'one\nthree\n--\ntwo\nbin'
E             actual:   '--'
E           
E           Expected stdout: 'one\nthree\n--\ntwo\nbin'
E           Actual stdout:   '--\n'
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\nbash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # Test that -X filter works with -F callback
E           shopt -s extglob
E           fun() {
E             COMPREPLY=(one two three bin)
E           }
E           compgen -X "@(two|bin)" -F fun
E           echo --
E           compgen -X "!@(two|bin)" -F fun
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -W words -X filter[L454]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917ec30>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -W words -X filter', script="# Extglob patterns work in compgen -X filter\nshopt -s extglob\nco...in'", assertions=[Assertion(type='stdout', value='one\nthree', shells=None, variant=None)], line_number=454, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -W words -X filter (line 454)
E           
E           stdout mismatch:
E             expected: 'one\nthree'
E             actual:   ''
E           
E           Expected stdout: 'one\nthree'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # Extglob patterns work in compgen -X filter
E           shopt -s extglob
E           compgen -X '@(two|bin)' -W 'one two three bin'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -f -X filter -- $cur[L463]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917ecf0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -f -X filter -- $cur', script="# Negated extglob filter: keep only files matching *.py\nshopt -...ssertion(type='stdout', value='spam.py\nspam.sh\n--\nspam.py', shells=None, variant=None)], line_number=463, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -f -X filter -- $cur (line 463)
E           
E           stdout mismatch:
E             expected: 'spam.py\nspam.sh\n--\nspam.py'
E             actual:   '--'
E           
E           Expected stdout: 'spam.py\nspam.sh\n--\nspam.py'
E           Actual stdout:   '--\n'
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # Negated extglob filter: keep only files matching *.py
E           shopt -s extglob
E           cd $TMP
E           touch spam.py spam.sh
E           compgen -f -- sp | sort
E           echo --
E           compgen -f -X '!*.@(py)' -- sp
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen doesn't need shell quoting[L478]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917edb0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name="compgen doesn't need shell quoting", script='# There is an obsolete comment in bash_completion that cla...assertions=[Assertion(type='stdout', value="foo bar\nfoo'bar", shells=None, variant=None)], line_number=478, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen doesn't need shell quoting (line 478)
E           
E           stdout mismatch:
E             expected: "foo bar\nfoo'bar"
E             actual:   ''
E           
E           Expected stdout: "foo bar\nfoo'bar"
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\nbash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # There is an obsolete comment in bash_completion that claims the opposite.
E           cd $TMP
E           touch 'foo bar'
E           touch "foo'bar"
E           compgen -f "foo b"
E           compgen -f "foo'"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -W 'one two three'[L490]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917ee70>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name="compgen -W 'one two three'", script="# Create test structure inline - needs vendor directory\nmkdir -p ...lue='one\ntwo\nthree\n--\nvendor\nv1\nv2\n--\nvendor\nv1\nv2', shells=None, variant=None)], line_number=490, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -W 'one two three' (line 490)
E           
E           stdout mismatch:
E             expected: 'one\ntwo\nthree\n--\nvendor\nv1\nv2\n--\nvendor\nv1\nv2'
E             actual:   '--\n--'
E           
E           Expected stdout: 'one\ntwo\nthree\n--\nvendor\nv1\nv2\n--\nvendor\nv1\nv2'
E           Actual stdout:   '--\n--\n'
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\nbash: compgen: command not found\nbash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # Create test structure inline - needs vendor directory
E           mkdir -p /tmp/compgen-w-test/vendor
E           cd /tmp/compgen-w-test
E           compgen -W 'one two three'
E           echo --
E           compgen -W 'v1 v2 three' -A directory v
E           echo --
E           compgen -A directory -W 'v1 v2 three' v  # order doesn't matter
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -W evaluates code in $()[L513]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917ef30>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -W evaluates code in $()', script='IFS=\':%\'\ncompgen -W \'$(echo "spam:eggs%ham cheese")\'', ...ions=[Assertion(type='stdout', value='spam\neggs\nham cheese', shells=None, variant=None)], line_number=513, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -W evaluates code in $() (line 513)
E           
E           stdout mismatch:
E             expected: 'spam\neggs\nham cheese'
E             actual:   ''
E           
E           Expected stdout: 'spam\neggs\nham cheese'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           IFS=':%'
E           compgen -W '$(echo "spam:eggs%ham cheese")'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -W uses IFS, and delimiters are escaped with \\[L522]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917eff0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -W uses IFS, and delimiters are escaped with \\', script="IFS=':%'\ncompgen -W 'spam:eggs%ham c...Assertion(type='stdout', value='spam\neggs\nham cheese:colon', shells=None, variant=None)], line_number=522, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -W uses IFS, and delimiters are escaped with \ (line 522)
E           
E           stdout mismatch:
E             expected: 'spam\neggs\nham cheese:colon'
E             actual:   ''
E           
E           Expected stdout: 'spam\neggs\nham cheese:colon'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           IFS=':%'
E           compgen -W 'spam:eggs%ham cheese\:colon'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::Parse errors for compgen -W and complete -W[L531]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917f0b0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='Parse errors for compgen -W and complete -W', script="# bash doesn't detect as many errors because it l...ne), Assertion(type='stdout', value='status=1\nstatus=0', shells=['bash'], variant='BUG')], line_number=531, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Parse errors for compgen -W and complete -W (line 531)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=0'
E             actual:   ''
E           
E           Expected stdout: 'status=1\nstatus=0'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # bash doesn't detect as many errors because it lacks static parsing.
E           compgen -W '${'
E           echo status=$?
E           complete -W '${' foo
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -A builtin[L570]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917f3b0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -A builtin', script='compgen -A builtin g', assertions=[Assertion(type='stdout', value='getopts', shells=None, variant=None)], line_number=570, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -A builtin (line 570)
E           
E           stdout mismatch:
E             expected: 'getopts'
E             actual:   ''
E           
E           Expected stdout: 'getopts'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           compgen -A builtin g
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::complete -C vs. compgen -C[L576]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917f470>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='complete -C vs. compgen -C', script='f() { echo foo; echo bar; }\n\n# Bash prints warnings: -C option m...ertion(type='stdout', value='foo\nbar\ncompgen=0\ncomplete=0', shells=None, variant=None)], line_number=576, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: complete -C vs. compgen -C (line 576)
E           
E           stdout mismatch:
E             expected: 'foo\nbar\ncompgen=0\ncomplete=0'
E             actual:   'compgen=1\ncomplete=1'
E           
E           Expected stdout: 'foo\nbar\ncompgen=0\ncomplete=0'
E           Actual stdout:   'compgen=1\ncomplete=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\nbash: complete: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f() { echo foo; echo bar; }
E           
E           # Bash prints warnings: -C option may not work as you expect
E           #                       -F option may not work as you expect
E           #
E           # https://unix.stackexchange.com/questions/117987/compgen-warning-c-option-not-working-as-i-expected
E           #
E           # compexport fixes this problem, because it invokves ShellFuncAction, whcih
E           # sets COMP_ARGV, COMP_WORDS, etc.
E           #
E           # Should we print a warning?
E           
E           compgen -C f b
E           echo compgen=$?
E           
E           complete -C f b
E           echo complete=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-completion.test.sh::compgen -F with scalar COMPREPLY[L636]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917f6b0>
test_file = 'builtin-completion.test.sh'
test_case = TestCase(name='compgen -F with scalar COMPREPLY', script='_comp_cmd_test() {\n  unset -v COMPREPLY\n  COMPREPLY=hello\...cmd_test', assertions=[Assertion(type='stdout', value='hello', shells=None, variant=None)], line_number=636, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: compgen -F with scalar COMPREPLY (line 636)
E           
E           stdout mismatch:
E             expected: 'hello'
E             actual:   ''
E           
E           Expected stdout: 'hello'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: compgen: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           _comp_cmd_test() {
E             unset -v COMPREPLY
E             COMPREPLY=hello
E           }
E           compgen -F _comp_cmd_test
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[builtin-dirs.test.sh::pushd/popd[L5]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917f770>
test_file = 'builtin-dirs.test.sh'
test_case = TestCase(name='pushd/popd', script='set -o errexit\ncd /\npushd /tmp\necho -n pwd=; pwd\npopd\necho -n pwd=; pwd', ass...ant='N-I'), Assertion(type='stdout-json', value='', shells=['dash', 'mksh'], variant='N-I')], line_number=5, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: pushd/popd (line 5)
E           
E           stdout mismatch:
E             expected: '~ /\npwd=/tmp\n/\npwd=/'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: '~ /\npwd=/tmp\n/\npwd=/'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           set -o errexit
E           cd /
E           pushd /tmp
E           echo -n pwd=; pwd
E           popd
E           echo -n pwd=; pwd
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[builtin-dirs.test.sh::pushd usage[L26]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917f830>
test_file = 'builtin-dirs.test.sh'
test_case = TestCase(name='pushd usage', script='pushd -z\necho status=$?\npushd /tmp >/dev/null\necho status=$?\npushd -- /tmp >/...sertion(type='stdout', value='status=1\nstatus=0\nstatus=0', shells=['zsh'], variant='OK')], line_number=26, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: pushd usage (line 26)
E           
E           stdout mismatch:
E             expected: 'status=2\nstatus=0\nstatus=0'
E             actual:   'status=1\nstatus=1\nstatus=1'
E           
E           Expected stdout: 'status=2\nstatus=0\nstatus=0'
E           Actual stdout:   'status=1\nstatus=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\nbash: pushd: command not found\nbash: pushd: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           pushd -z
E           echo status=$?
E           pushd /tmp >/dev/null
E           echo status=$?
E           pushd -- /tmp >/dev/null
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-dirs.test.sh::popd usage error[L44]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917f8f0>
test_file = 'builtin-dirs.test.sh'
test_case = TestCase(name='popd usage error', script='pushd / >/dev/null\npopd zzz\necho status=$?\n\npopd -- >/dev/null\necho sta...ertion(type='stdout', value='status=0\nstatus=0\nstatus=0', shells=['zsh'], variant='BUG')], line_number=44, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: popd usage error (line 44)
E           
E           stdout mismatch:
E             expected: 'status=2\nstatus=0\nstatus=2'
E             actual:   'status=1\nstatus=1\nstatus=1'
E           
E           Expected stdout: 'status=2\nstatus=0\nstatus=2'
E           Actual stdout:   'status=1\nstatus=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\nbash: popd: command not found\nbash: popd: command not found\nbash: popd: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           pushd / >/dev/null
E           popd zzz
E           echo status=$?
E           
E           popd -- >/dev/null
E           echo status=$?
E           
E           popd -z
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-dirs.test.sh::popd returns error on empty directory stack[L65]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917f9b0>
test_file = 'builtin-dirs.test.sh'
test_case = TestCase(name='popd returns error on empty directory stack', script='message=$(popd 2>&1)\necho $?\necho "$message" | ...ssertions=[Assertion(type='stdout', value='1\ndirectory stack', shells=None, variant=None)], line_number=65, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: popd returns error on empty directory stack (line 65)
E           
E           stdout mismatch:
E             expected: '1\ndirectory stack'
E             actual:   '0'
E           
E           Expected stdout: '1\ndirectory stack'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           message=$(popd 2>&1)
E           echo $?
E           echo "$message" | grep -o "directory stack"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-dirs.test.sh::cd replaces the lowest entry on the directory stack![L74]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917fa70>
test_file = 'builtin-dirs.test.sh'
test_case = TestCase(name='cd replaces the lowest entry on the directory stack!', script='# stable temp dir\ndir=/tmp/oils-spec/bu...uiltin-dirs\ncd=0\n/ ~/oils-spec/builtin-dirs\npopd=0\npopd=1', shells=None, variant=None)], line_number=74, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: cd replaces the lowest entry on the directory stack! (line 74)
E           
E           stdout mismatch:
E             expected: 'pushd=0\n~ ~/oils-spec/builtin-dirs\ncd=0\n/ ~/oils-spec/builtin-dirs\npopd=0\npopd=1'
E             actual:   'pushd=1\ncd=0\npopd=1\npopd=1'
E           
E           Expected stdout: 'pushd=0\n~ ~/oils-spec/builtin-dirs\ncd=0\n/ ~/oils-spec/builtin-dirs\npopd=0\npopd=1'
E           Actual stdout:   'pushd=1\ncd=0\npopd=1\npopd=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\nbash: dirs: command not found\nbash: dirs: command not found\nbash: popd: command not found\nbash: popd: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # stable temp dir
E           dir=/tmp/oils-spec/builtin-dirs
E           
E           mkdir -p $dir
E           cd $dir
E           
E           pushd /tmp >/dev/null
E           echo pushd=$?
E           
E           dirs
E           
E           cd /
E           echo cd=$?
E           
E           dirs
E           
E           popd >/dev/null
E           echo popd=$?
E           
E           popd >/dev/null
E           echo popd=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[builtin-dirs.test.sh::dirs builtin[L106]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917fb30>
test_file = 'builtin-dirs.test.sh'
test_case = TestCase(name='dirs builtin', script='cd /\ndirs', assertions=[Assertion(type='status', value=0, shells=None, variant=None), Assertion(type='stdout', value='/', shells=None, variant=None)], line_number=106, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: dirs builtin (line 106)
E           
E           stdout mismatch:
E             expected: '/'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: '/'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: dirs: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           cd /
E           dirs
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-dirs.test.sh::dirs -c to clear the stack[L114]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917fbf0>
test_file = 'builtin-dirs.test.sh'
test_case = TestCase(name='dirs -c to clear the stack', script="set -o errexit\ncd /\npushd /tmp >/dev/null  # zsh pushd doesn't p...ariant=None), Assertion(type='stdout', value='--\n~ /\n--\n~', shells=None, variant=None)], line_number=114, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: dirs -c to clear the stack (line 114)
E           
E           stdout mismatch:
E             expected: '--\n~ /\n--\n~'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: '--\n~ /\n--\n~'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           set -o errexit
E           cd /
E           pushd /tmp >/dev/null  # zsh pushd doesn't print anything, but bash does
E           echo --
E           dirs
E           dirs -c
E           echo --
E           dirs
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-dirs.test.sh::dirs -v to print numbered stack, one entry per line[L131]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917fcb0>
test_file = 'builtin-dirs.test.sh'
test_case = TestCase(name='dirs -v to print numbered stack, one entry per line', script='set -o errexit\ncd /\npushd /tmp >/dev/nu...', value='--\n0\t/tmp\n1\t/\n--\n0\t/dev\n1\t/tmp\n2\t/\n', shells=['zsh'], variant='OK')], line_number=131, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: dirs -v to print numbered stack, one entry per line (line 131)
E           
E           stdout mismatch:
E             expected: '--\n 0  ~\n 1  /\n--\n 0  /dev\n 1  ~\n 2  /'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: '--\n 0  ~\n 1  /\n--\n 0  /dev\n 1  ~\n 2  /'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           set -o errexit
E           cd /
E           pushd /tmp >/dev/null
E           echo --
E           dirs -v
E           pushd /dev >/dev/null
E           echo --
E           dirs -v
E           #
E           #  zsh uses tabs
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-dirs.test.sh::dirs -p to print one entry per line[L154]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917fd70>
test_file = 'builtin-dirs.test.sh'
test_case = TestCase(name='dirs -p to print one entry per line', script='set -o errexit\ncd /\npushd /tmp >/dev/null\necho --\ndir...ns=[Assertion(type='stdout', value='--\n~\n/\n--\n/dev\n~\n/', shells=None, variant=None)], line_number=154, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: dirs -p to print one entry per line (line 154)
E           
E           stdout mismatch:
E             expected: '--\n~\n/\n--\n/dev\n~\n/'
E             actual:   ''
E           
E           Expected stdout: '--\n~\n/\n--\n/dev\n~\n/'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           set -o errexit
E           cd /
E           pushd /tmp >/dev/null
E           echo --
E           dirs -p
E           pushd /dev >/dev/null
E           echo --
E           dirs -p
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-dirs.test.sh::dirs -l to print in long format, no tilde prefix[L173]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917fe30>
test_file = 'builtin-dirs.test.sh'
test_case = TestCase(name='dirs -l to print in long format, no tilde prefix', script="# Can't use the OSH test harness for this be...ssertion(type='stdout', value='~/oil_test /\n/tmp/oil_test /', shells=None, variant=None)], line_number=173, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: dirs -l to print in long format, no tilde prefix (line 173)
E           
E           stdout mismatch:
E             expected: '~/oil_test /\n/tmp/oil_test /'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: '~/oil_test /\n/tmp/oil_test /'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\nbash: dirs: command not found\nbash: dirs: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           # Can't use the OSH test harness for this because
E           # /home/<username> may be included in a path.
E           cd /
E           HOME=/tmp
E           mkdir -p $HOME/oil_test
E           pushd $HOME/oil_test >/dev/null
E           dirs
E           dirs -l
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-dirs.test.sh::dirs to print using tilde-prefix format[L188]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917fef0>
test_file = 'builtin-dirs.test.sh'
test_case = TestCase(name='dirs to print using tilde-prefix format', script='cd /\nHOME=/tmp\nmkdir -p $HOME/oil_test\npushd $HOME... shells=None, variant=None), Assertion(type='status', value=0, shells=None, variant=None)], line_number=188, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: dirs to print using tilde-prefix format (line 188)
E           
E           stdout mismatch:
E             expected: '~/oil_test /'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: '~/oil_test /'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\nbash: dirs: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           cd /
E           HOME=/tmp
E           mkdir -p $HOME/oil_test
E           pushd $HOME/oil_test >/dev/null
E           dirs
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-dirs.test.sh::dirs test converting true home directory to tilde[L197]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10917ffb0>
test_file = 'builtin-dirs.test.sh'
test_case = TestCase(name='dirs test converting true home directory to tilde', script='cd /\nHOME=/tmp\nmkdir -p $HOME/oil_test/$H... shells=None, variant=None), Assertion(type='status', value=0, shells=None, variant=None)], line_number=197, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: dirs test converting true home directory to tilde (line 197)
E           
E           stdout mismatch:
E             expected: '~/oil_test/tmp /'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: '~/oil_test/tmp /'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\nbash: dirs: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           cd /
E           HOME=/tmp
E           mkdir -p $HOME/oil_test/$HOME
E           pushd $HOME/oil_test/$HOME >/dev/null
E           dirs
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-dirs.test.sh::dirs tilde test when $HOME is exactly $PWD[L214]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a4170>
test_file = 'builtin-dirs.test.sh'
test_case = TestCase(name='dirs tilde test when $HOME is exactly $PWD', script="cd /\nmkdir -p /tmp/oil_test\nHOME=/tmp/oil_test\n...h'], variant='OK'), Assertion(type='stdout', value='~ /\n~ /', shells=None, variant=None)], line_number=214, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: dirs tilde test when $HOME is exactly $PWD (line 214)
E           
E           stdout mismatch:
E             expected: '~ /\n~ /'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: '~ /\n~ /'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\nbash: dirs: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           cd /
E           mkdir -p /tmp/oil_test
E           HOME=/tmp/oil_test
E           pushd $HOME
E           dirs
E           # zsh doesn't duplicate the stack I guess.
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-dirs.test.sh::dirs test of path alias `..`[L230]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a4230>
test_file = 'builtin-dirs.test.sh'
test_case = TestCase(name='dirs test of path alias `..`', script='cd /tmp\npushd .. >/dev/null\ndirs', assertions=[Assertion(type=... shells=None, variant=None), Assertion(type='status', value=0, shells=None, variant=None)], line_number=230, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: dirs test of path alias `..` (line 230)
E           
E           stdout mismatch:
E             expected: '/ ~'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: '/ ~'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\nbash: dirs: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           cd /tmp
E           pushd .. >/dev/null
E           dirs
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-dirs.test.sh::dirs test of path alias `.`[L237]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a42f0>
test_file = 'builtin-dirs.test.sh'
test_case = TestCase(name='dirs test of path alias `.`', script='cd /tmp\npushd . >/dev/null\ndirs', assertions=[Assertion(type='s... shells=None, variant=None), Assertion(type='status', value=0, shells=None, variant=None)], line_number=237, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: dirs test of path alias `.` (line 237)
E           
E           stdout mismatch:
E             expected: '~ ~'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: '~ ~'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\nbash: dirs: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           cd /tmp
E           pushd . >/dev/null
E           dirs
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-echo.test.sh::echo -e with 4 digit unicode escape[L175]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a4ef0>
test_file = 'builtin-echo.test.sh'
test_case = TestCase(name='echo -e with 4 digit unicode escape', script="flags='-e'\ncase $SH in dash) flags='' ;; esac\n\necho $f...e), Assertion(type='stdout', value='abcd\\u0065f', shells=['dash', 'ash'], variant='N-I')], line_number=175, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: echo -e with 4 digit unicode escape (line 175)
E           
E           stdout mismatch:
E             expected: 'abcdef'
E             actual:   'abcd\\u0065f'
E           
E           Expected stdout: 'abcdef'
E           Actual stdout:   'abcd\\u0065f\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           flags='-e'
E           case $SH in dash) flags='' ;; esac
E           
E           echo $flags 'abcd\u0065f'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-echo.test.sh::echo -e with 8 digit unicode escape[L187]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a4fb0>
test_file = 'builtin-echo.test.sh'
test_case = TestCase(name='echo -e with 8 digit unicode escape', script="flags='-e'\ncase $SH in dash) flags='' ;; esac\n\necho $f...Assertion(type='stdout', value='abcd\\U00000065f', shells=['dash', 'ash'], variant='N-I')], line_number=187, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: echo -e with 8 digit unicode escape (line 187)
E           
E           stdout mismatch:
E             expected: 'abcdef'
E             actual:   'abcd\\U00000065f'
E           
E           Expected stdout: 'abcdef'
E           Actual stdout:   'abcd\\U00000065f\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           flags='-e'
E           case $SH in dash) flags='' ;; esac
E           
E           echo $flags 'abcd\U00000065f'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-echo.test.sh::\\0377 is the highest octal byte[L199]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a5070>
test_file = 'builtin-echo.test.sh'
test_case = TestCase(name='\\0377 is the highest octal byte', script="echo -en '\\03777' | od -A n -t x1 | sed 's/ \\+/ /g'", asse..., Assertion(type='stdout', value=' 2d 65 6e 20 ff 37 0a', shells=['dash'], variant='N-I')], line_number=199, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \0377 is the highest octal byte (line 199)
E           
E           stdout mismatch:
E             expected: ' ff 37'
E             actual:   '  c3  bf  37'
E           
E           Expected stdout: ' ff 37'
E           Actual stdout:   '  c3  bf  37\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo -en '\03777' | od -A n -t x1 | sed 's/ \+/ /g'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-echo.test.sh::\\0400 is one more than the highest octal byte[L208]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a5130>
test_file = 'builtin-echo.test.sh'
test_case = TestCase(name='\\0400 is one more than the highest octal byte', script="# It is 256 % 256 which gets interpreted as a ..., Assertion(type='stdout', value=' 2d 65 6e 20 00 30 0a', shells=['dash'], variant='N-I')], line_number=208, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \0400 is one more than the highest octal byte (line 208)
E           
E           stdout mismatch:
E             expected: ' 00 30'
E             actual:   '  c4  80  30'
E           
E           Expected stdout: ' 00 30'
E           Actual stdout:   '  c4  80  30\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # It is 256 % 256 which gets interpreted as a NUL byte.
E           echo -en '\04000' | od -A n -t x1 | sed 's/ \+/ /g'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-echo.test.sh::\\0777 is out of range[L221]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a51f0>
test_file = 'builtin-echo.test.sh'
test_case = TestCase(name='\\0777 is out of range', script="flags='-en'\ncase $SH in dash) flags='-n' ;; esac\n\necho $flags '\\07..., variant='BUG'), Assertion(type='stdout', value=' 3f 37', shells=['ash'], variant='BUG')], line_number=221, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \0777 is out of range (line 221)
E           
E           stdout mismatch:
E             expected: ' ff'
E             actual:   '  c7  bf'
E           
E           Expected stdout: ' ff'
E           Actual stdout:   '  c7  bf\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           flags='-en'
E           case $SH in dash) flags='-n' ;; esac
E           
E           echo $flags '\0777' | od -A n -t x1 | sed 's/ \+/ /g'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-echo.test.sh::incomplete hex escape[L236]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a52b0>
test_file = 'builtin-echo.test.sh'
test_case = TestCase(name='incomplete hex escape', script="echo -en 'abcd\\x6' | od -A n -c | sed 's/ \\+/ /g'", assertions=[Asser...sertion(type='stdout', value=' - e n a b c d \\ x 6 \\n', shells=['dash'], variant='N-I')], line_number=236, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: incomplete hex escape (line 236)
E           
E           stdout mismatch:
E             expected: ' a b c d 006'
E             actual:   '   a   b   c   d 006'
E           
E           Expected stdout: ' a b c d 006'
E           Actual stdout:   '   a   b   c   d 006\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo -en 'abcd\x6' | od -A n -c | sed 's/ \+/ /g'
E           ---

tests/spec_tests/test_spec.py:218: Failed
______ TestBashSpecTests.test_spec_case[builtin-echo.test.sh::\\x[L245]] _______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a5370>
test_file = 'builtin-echo.test.sh'
test_case = TestCase(name='\\x', script="# I consider mksh and zsh a bug because \\x is not an escape\necho -e '\\x' '\\xg' | od -..., Assertion(type='stdout', value=' \\0 \\0 g \\n', shells=['mksh', 'zsh'], variant='BUG')], line_number=245, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \x (line 245)
E           
E           stdout mismatch:
E             expected: ' \\ x \\ x g \\n'
E             actual:   '   \\   x       \\   x   g  \\n'
E           
E           Expected stdout: ' \\ x \\ x g \\n'
E           Actual stdout:   '   \\   x       \\   x   g  \\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # I consider mksh and zsh a bug because \x is not an escape
E           echo -e '\x' '\xg' | od -A n -c | sed 's/ \+/ /g'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-echo.test.sh::incomplete octal escape[L258]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a5430>
test_file = 'builtin-echo.test.sh'
test_case = TestCase(name='incomplete octal escape', script="flags='-en'\ncase $SH in dash) flags='-n' ;; esac\n\necho $flags 'abc...'", assertions=[Assertion(type='stdout', value=' a b c d 004', shells=None, variant=None)], line_number=258, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: incomplete octal escape (line 258)
E           
E           stdout mismatch:
E             expected: ' a b c d 004'
E             actual:   '   a   b   c   d 004'
E           
E           Expected stdout: ' a b c d 004'
E           Actual stdout:   '   a   b   c   d 004\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           flags='-en'
E           case $SH in dash) flags='-n' ;; esac
E           
E           echo $flags 'abcd\04' | od -A n -c | sed 's/ \+/ /g'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-echo.test.sh::incomplete unicode escape[L267]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a54f0>
test_file = 'builtin-echo.test.sh'
test_case = TestCase(name='incomplete unicode escape', script="echo -en 'abcd\\u006' | od -A n -c | sed 's/ \\+/ /g'", assertions=...I'), Assertion(type='stdout', value=' a b c d \\ u 0 0 6', shells=['ash'], variant='BUG')], line_number=267, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: incomplete unicode escape (line 267)
E           
E           stdout mismatch:
E             expected: ' a b c d 006'
E             actual:   '   a   b   c   d   \\   u   0   0   6'
E           
E           Expected stdout: ' a b c d 006'
E           Actual stdout:   '   a   b   c   d   \\   u   0   0   6\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo -en 'abcd\u006' | od -A n -c | sed 's/ \+/ /g'
E           ---

tests/spec_tests/test_spec.py:218: Failed
______ TestBashSpecTests.test_spec_case[builtin-echo.test.sh::\\u6[L279]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a55b0>
test_file = 'builtin-echo.test.sh'
test_case = TestCase(name='\\u6', script="flags='-en'\ncase $SH in dash) flags='-n' ;; esac\n\necho $flags '\\u6' | od -A n -c | s...t=None), Assertion(type='stdout', value=' \\ u 6', shells=['dash', 'ash'], variant='N-I')], line_number=279, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \u6 (line 279)
E           
E           stdout mismatch:
E             expected: ' 006'
E             actual:   '   \\   u   6'
E           
E           Expected stdout: ' 006'
E           Actual stdout:   '   \\   u   6\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           flags='-en'
E           case $SH in dash) flags='-n' ;; esac
E           
E           echo $flags '\u6' | od -A n -c | sed 's/ \+/ /g'
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[builtin-echo.test.sh::\\0 \\1 \\8[L291]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a5670>
test_file = 'builtin-echo.test.sh'
test_case = TestCase(name='\\0 \\1 \\8', script="# \\0 is special, but \\1 isn't in bash\n# \\1 is special in dash!  geez\nflags='...), Assertion(type='stdout', value=' \\0 001 \\ 8', shells=['dash', 'ash'], variant='BUG')], line_number=291, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \0 \1 \8 (line 291)
E           
E           stdout mismatch:
E             expected: ' \\0 \\ 1 \\ 8'
E             actual:   '  \\0       \\   1       \\   8'
E           
E           Expected stdout: ' \\0 \\ 1 \\ 8'
E           Actual stdout:   '  \\0       \\   1       \\   8\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # \0 is special, but \1 isn't in bash
E           # \1 is special in dash!  geez
E           flags='-en'
E           case $SH in dash) flags='-n' ;; esac
E           
E           echo $flags '\0' '\1' '\8' | od -A n -c | sed 's/ \+/ /g'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-echo.test.sh::echo to redirected directory is an error[L306]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a5730>
test_file = 'builtin-echo.test.sh'
test_case = TestCase(name='echo to redirected directory is an error', script='mkdir -p dir\n\necho foo > ./dir\necho status=$?\npr...one), Assertion(type='stdout', value='status=2\nstatus=2', shells=['dash'], variant='OK')], line_number=306, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: echo to redirected directory is an error (line 306)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1'
E             actual:   'status=0\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1'
E           Actual stdout:   'status=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkdir -p dir
E           
E           echo foo > ./dir
E           echo status=$?
E           printf foo > ./dir
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-eval-source.test.sh::eval accepts/ignores --[L9]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a58b0>
test_file = 'builtin-eval-source.test.sh'
test_case = TestCase(name='eval accepts/ignores --', script='eval -- echo hi', assertions=[Assertion(type='stdout', value='hi', sh...'], variant='BUG'), Assertion(type='stdout-json', value='', shells=['dash'], variant='BUG')], line_number=9, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: eval accepts/ignores -- (line 9)
E           
E           stdout mismatch:
E             expected: 'hi'
E             actual:   ''
E           
E           Expected stdout: 'hi'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: --: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           eval -- echo hi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-eval-source.test.sh::eval usage[L17]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a5970>
test_file = 'builtin-eval-source.test.sh'
test_case = TestCase(name='eval usage', script='eval -\necho $?\neval -z\necho $?\n127\n0\n127', assertions=[Assertion(type='stdou..., variant=None), Assertion(type='stdout', value='127\n127', shells=['dash'], variant='OK')], line_number=17, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: eval usage (line 17)
E           
E           stdout mismatch:
E             expected: '127\n2'
E             actual:   '1\n1'
E           
E           Expected stdout: '127\n2'
E           Actual stdout:   '1\n1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: -: command not found\nbash: -z: command not found\nbash: 127: command not found\nbash: 0: command not found\nbash: 127: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           eval -
E           echo $?
E           eval -z
E           echo $?
E           127
E           0
E           127
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-eval-source.test.sh::eval string with 'break continue return error'[L39]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a5a30>
test_file = 'builtin-eval-source.test.sh'
test_case = TestCase(name="eval string with 'break continue return error'", script='set -e\n\nsh_func_that_evals() {\n  local code...-- continue\n1\n2\nend func\n--- return\n1\n--- false\n1', shells=['mksh'], variant='BUG')], line_number=39, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: eval string with 'break continue return error' (line 39)
E           
E           stdout mismatch:
E             expected: '--- break\n1\nend func\n--- continue\n1\n2\nend func\n--- return\n1\n--- false\n1'
E             actual:   ''
E           
E           Expected stdout: '--- break\n1\nend func\n--- continue\n1\n2\nend func\n--- return\n1\n--- false\n1'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: eval: break 1\n'
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           set -e
E           
E           sh_func_that_evals() {
E             local code_str=$1
E             for i in 1 2; do
E               echo $i
E               eval "$code_str"
E             done
E             echo 'end func'
E           }
E           
E           for code_str in break continue return false; do
E             echo "--- $code_str"
E             sh_func_that_evals "$code_str"
E           done
E           echo status=$?
E           
E           
E           
E           # #### eval YSH block with 'break continue return error'
E           # case $SH in dash|bash*|mksh|zsh) exit ;; esac
E           #
E           # shopt -s ysh:all
E           #
E           # proc proc_that_evals(; ; ;b) {
E           #   for i in 1 2; do
E           #     echo $i
E           #     call io->eval(b)
E           #   done
E           #   echo 'end func'
E           # }
E           #
E           # var cases = [
E           #   ['break', ^(break)],
E           #   ['continue', ^(continue)],
E           #   ['return', ^(return)],
E           #   ['false', ^(false)],
E           # ]
E           #
E           # for test_case in (cases) {
E           #   var code_str, block = test_case
E           #   echo "--- $code_str"
E           #   proc_that_evals (; ; block)
E           # }
E           # echo status=$?
E           #
E           # ## status: 1
E           # ## STDOUT:
E           # --- break
E           # 1
E           # end func
E           # --- continue
E           # 1
E           # 2
E           # end func
E           # --- return
E           # 1
E           # --- false
E           # 1
E           # ## END
E           #
E           # ## N-I dash/bash/mksh/zsh status: 0
E           # ## N-I dash/bash/mksh/zsh STDOUT:
E           # ## END
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-eval-source.test.sh::exit within eval (regression)[L135]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a5af0>
test_file = 'builtin-eval-source.test.sh'
test_case = TestCase(name='exit within eval (regression)', script="eval 'exit 42'\necho 'should not get here'", assertions=[Assert...shells=None, variant=None), Assertion(type='status', value=42, shells=None, variant=None)], line_number=135, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: exit within eval (regression) (line 135)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'should not get here'
E           status mismatch: expected 42, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'should not get here\n'
E           Expected stderr: None
E           Actual stderr:   'bash: eval: exit 42\n'
E           Expected status: 42
E           Actual status:   0
E           
E           Script:
E           ---
E           eval 'exit 42'
E           echo 'should not get here'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-eval-source.test.sh::source accepts/ignores --[L156]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a5d30>
test_file = 'builtin-eval-source.test.sh'
test_case = TestCase(name='source accepts/ignores --', script="echo 'echo foo' > $TMP/foo.sh\nsource -- $TMP/foo.sh", assertions=[...sh'], variant='N-I'), Assertion(type='status', value=127, shells=['dash'], variant='N-I')], line_number=156, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: source accepts/ignores -- (line 156)
E           
E           stdout mismatch:
E             expected: 'foo'
E             actual:   ''
E           
E           Expected stdout: 'foo'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: source: --: No such file or directory\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           echo 'echo foo' > $TMP/foo.sh
E           source -- $TMP/foo.sh
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-eval-source.test.sh::Source with syntax error[L222]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a60f0>
test_file = 'builtin-eval-source.test.sh'
test_case = TestCase(name='Source with syntax error', script="# TODO: We should probably use dash behavior of a fatal error.\n# Al...['dash'], variant='OK'), Assertion(type='status', value=2, shells=['dash'], variant='OK')], line_number=222, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Source with syntax error (line 222)
E           
E           Execution error: Expected target for redirection > at line 1, column 7
E           
E           
E           Script:
E           ---
E           # TODO: We should probably use dash behavior of a fatal error.
E           # Although set-o errexit handles this.  We don't want to break the invariant
E           # that a builtin like 'source' behaves like an external program.  An external
E           # program can't halt the shell!
E           echo 'echo >' > $TMP/syntax-error.sh
E           . $TMP/syntax-error.sh
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-eval-source.test.sh::source looks in PATH for files[L323]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a64b0>
test_file = 'builtin-eval-source.test.sh'
test_case = TestCase(name='source looks in PATH for files', script='mkdir -p dir\necho "echo hi" > dir/cmd\nPATH="dir:$PATH"\n. cmd\nrm dir/cmd', assertions=[Assertion(type='stdout', value='hi', shells=None, variant=None)], line_number=323, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: source looks in PATH for files (line 323)
E           
E           stdout mismatch:
E             expected: 'hi'
E             actual:   ''
E           
E           Expected stdout: 'hi'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: source: cmd: No such file or directory\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkdir -p dir
E           echo "echo hi" > dir/cmd
E           PATH="dir:$PATH"
E           . cmd
E           rm dir/cmd
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-eval-source.test.sh::source finds files in PATH before current dir[L333]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a6570>
test_file = 'builtin-eval-source.test.sh'
test_case = TestCase(name='source finds files in PATH before current dir', script='cd $TMP\nmkdir -p dir\necho "echo path" > dir/c..., assertions=[Assertion(type='stdout', value='path\nstatus=0', shells=None, variant=None)], line_number=333, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: source finds files in PATH before current dir (line 333)
E           
E           stdout mismatch:
E             expected: 'path\nstatus=0'
E             actual:   'current dir\nstatus=0'
E           
E           Expected stdout: 'path\nstatus=0'
E           Actual stdout:   'current dir\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           cd $TMP
E           mkdir -p dir
E           echo "echo path" > dir/cmd
E           echo "echo current dir" > cmd
E           PATH="dir:$PATH"
E           . cmd
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-eval-source.test.sh::sourcing along PATH should ignore directories[L363]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a67b0>
test_file = 'builtin-eval-source.test.sh'
test_case = TestCase(name='sourcing along PATH should ignore directories', script='mkdir -p _tmp/shell\nmkdir -p _tmp/dir/hello.sh..., Assertion(type='stdout', value='hi\nstatus=0\nstatus=0', shells=['mksh'], variant='OK')], line_number=363, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: sourcing along PATH should ignore directories (line 363)
E           
E           stdout mismatch:
E             expected: 'hi\nstatus=0\nhi\nstatus=0'
E             actual:   'status=1\nstatus=1'
E           
E           Expected stdout: 'hi\nstatus=0\nhi\nstatus=0'
E           Actual stdout:   'status=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: source: hello.sh: No such file or directory\nbash: source: hello.sh: No such file or directory\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkdir -p _tmp/shell
E           mkdir -p _tmp/dir/hello.sh
E           printf 'echo hi' >_tmp/shell/hello.sh
E           
E           DIR=$PWD/_tmp/dir
E           SHELL=$PWD/_tmp/shell
E           
E           # Should find the file hello.sh right away and source it
E           PATH="$SHELL:$PATH" . hello.sh
E           echo status=$?
E           
E           # Should fail because hello.sh cannot be found
E           PATH="$DIR:$SHELL:$PATH" . hello.sh
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::getopts sees unknown arg[L10]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a6930>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name='getopts sees unknown arg', script='set -- -Z\ngetopts \'a:\' opt\necho "status=$? opt=$opt OPTARG=$OPTA...tions=[Assertion(type='stdout', value='status=0 opt=? OPTARG=', shells=None, variant=None)], line_number=10, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: getopts sees unknown arg (line 10)
E           
E           stdout mismatch:
E             expected: 'status=0 opt=? OPTARG='
E             actual:   'status=0 opt=? OPTARG=Z'
E           
E           Expected stdout: 'status=0 opt=? OPTARG='
E           Actual stdout:   'status=0 opt=? OPTARG=Z\n'
E           Expected stderr: None
E           Actual stderr:   'bash: getopts: illegal option -- Z\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- -Z
E           getopts 'a:' opt
E           echo "status=$? opt=$opt OPTARG=$OPTARG"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::getopts with invalid variable name[L74]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a6cf0>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name='getopts with invalid variable name', script="set -- -c foo -h\ngetopts 'hc:' opt-\necho status=$? opt=$...tion(type='stdout', value='status=1 opt= OPTARG= OPTIND=1', shells=['mksh'], variant='OK')], line_number=74, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: getopts with invalid variable name (line 74)
E           
E           stdout mismatch:
E             expected: 'status=1 opt= OPTARG=foo OPTIND=3'
E             actual:   'status=0 opt= OPTARG=foo OPTIND=3'
E           
E           Expected stdout: 'status=1 opt= OPTARG=foo OPTIND=3'
E           Actual stdout:   'status=0 opt= OPTARG=foo OPTIND=3\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- -c foo -h
E           getopts 'hc:' opt-
E           echo status=$? opt=$opt OPTARG=$OPTARG OPTIND=$OPTIND
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::getopts with invalid flag[L82]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a6db0>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name='getopts with invalid flag', script='set -- -h -x\nwhile getopts "hc:" opt; do\n  case $opt in\n    h) F..., shells=None, variant=None), Assertion(type='status', value=2, shells=None, variant=None)], line_number=82, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: getopts with invalid flag (line 82)
E           
E           stdout mismatch:
E             expected: 'ERROR 3'
E             actual:   ''
E           
E           Expected stdout: 'ERROR 3'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: getopts: illegal option -- x\n'
E           Expected status: 2
E           Actual status:   2
E           
E           Script:
E           ---
E           set -- -h -x
E           while getopts "hc:" opt; do
E             case $opt in
E               h) FLAG_h=1 ;;
E               c) FLAG_c="$OPTARG" ;;
E               '?') echo ERROR $OPTIND; exit 2; ;;
E             esac
E           done
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::getopts missing required argument[L111]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a6f30>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name='getopts missing required argument', script='set -- -h -c\nwhile getopts "hc:" opt; do\n  case $opt in\n... shells=None, variant=None), Assertion(type='status', value=2, shells=None, variant=None)], line_number=111, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: getopts missing required argument (line 111)
E           
E           stdout mismatch:
E             expected: 'ERROR 3'
E             actual:   ''
E           
E           Expected stdout: 'ERROR 3'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: getopts: option requires an argument -- c\n'
E           Expected status: 2
E           Actual status:   2
E           
E           Script:
E           ---
E           set -- -h -c
E           while getopts "hc:" opt; do
E             case $opt in
E               h) FLAG_h=1 ;;
E               c) FLAG_c="$OPTARG" ;;
E               '?') echo ERROR $OPTIND; exit 2; ;;
E             esac
E           done
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::getopts doesn't look for flags after args[L124]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a6ff0>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name="getopts doesn't look for flags after args", script='set -- x -h -c y\nFLAG_h=0\nFLAG_c=\'\'\nwhile geto...sertion(type='stdout', value='h=0 c= optind=1 argv=x -h -c y', shells=None, variant=None)], line_number=124, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: getopts doesn't look for flags after args (line 124)
E           
E           stdout mismatch:
E             expected: 'h=0 c= optind=1 argv=x -h -c y'
E             actual:   'h=0 c= optind= argv=x -h -c y'
E           
E           Expected stdout: 'h=0 c= optind=1 argv=x -h -c y'
E           Actual stdout:   'h=0 c= optind= argv=x -h -c y\n'
E           Expected stderr: None
E           Actual stderr:   'bash: shift: -1: shift count out of range\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- x -h -c y
E           FLAG_h=0
E           FLAG_c=''
E           while getopts "hc:" opt; do
E             case $opt in
E               h) FLAG_h=1 ;;
E               c) FLAG_c="$OPTARG" ;;
E             esac
E           done
E           shift $(( OPTIND - 1 ))
E           echo h=$FLAG_h c=$FLAG_c optind=$OPTIND argv=$@
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::OPTIND[L155]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a7170>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name='OPTIND', script='echo $OPTIND', assertions=[Assertion(type='stdout', value='1', shells=None, variant=None)], line_number=155, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: OPTIND (line 155)
E           
E           stdout mismatch:
E             expected: '1'
E             actual:   ''
E           
E           Expected stdout: '1'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo $OPTIND
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::OPTIND after multiple getopts with same spec[L159]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a7230>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name='OPTIND after multiple getopts with same spec', script='while getopts "hc:" opt; do\n  echo \'-\'\ndone\...ype='stdout', value='OPTIND=1\n-\n-\nOPTIND=4\nOPTIND=4', shells=['mksh'], variant='BUG')], line_number=159, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: OPTIND after multiple getopts with same spec (line 159)
E           
E           stdout mismatch:
E             expected: 'OPTIND=1\n-\n-\nOPTIND=4\nOPTIND=1'
E             actual:   'OPTIND=\n-\n-\nOPTIND=4\nOPTIND=4'
E           
E           Expected stdout: 'OPTIND=1\n-\n-\nOPTIND=4\nOPTIND=1'
E           Actual stdout:   'OPTIND=\n-\n-\nOPTIND=4\nOPTIND=4\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           while getopts "hc:" opt; do
E             echo '-'
E           done
E           echo OPTIND=$OPTIND
E           
E           set -- -h -c foo x y z
E           while getopts "hc:" opt; do
E             echo '-'
E           done
E           echo OPTIND=$OPTIND
E           
E           set --
E           while getopts "hc:" opt; do
E             echo '-'
E           done
E           echo OPTIND=$OPTIND
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::OPTIND after multiple getopts with different spec[L192]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a72f0>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name='OPTIND after multiple getopts with different spec', script='# Wow this is poorly specified!  A fundamen...='stdout', value='.\nOPTIND=2\n-\n-\nOPTIND=5\nOPTIND=5', shells=['mksh'], variant='BUG')], line_number=192, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: OPTIND after multiple getopts with different spec (line 192)
E           
E           stdout mismatch:
E             expected: '.\nOPTIND=2\n-\n-\nOPTIND=5\nOPTIND=2'
E             actual:   '.\nOPTIND=2\n-\n-\nOPTIND=5\nOPTIND=5'
E           
E           Expected stdout: '.\nOPTIND=2\n-\n-\nOPTIND=5\nOPTIND=2'
E           Actual stdout:   '.\nOPTIND=2\n-\n-\nOPTIND=5\nOPTIND=5\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Wow this is poorly specified!  A fundamental design problem with the global
E           # variable OPTIND.
E           set -- -a
E           while getopts "ab:" opt; do
E             echo '.'
E           done
E           echo OPTIND=$OPTIND
E           
E           set -- -c -d -e foo
E           while getopts "cde:" opt; do
E             echo '-'
E           done
E           echo OPTIND=$OPTIND
E           
E           set -- -f
E           while getopts "f:" opt; do
E             echo '_'
E           done
E           echo OPTIND=$OPTIND
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::two flags: -ab[L304]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a75f0>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name='two flags: -ab', script='getopts "ab" opt -ab\necho OPTIND=$OPTIND opt=$opt OPTARG=$OPTARG\ngetopts "ab...D=2 opt=a OPTARG=\nOPTIND=2 opt=b OPTARG=', shells=['dash', 'mksh', 'ash'], variant='OK')], line_number=304, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: two flags: -ab (line 304)
E           
E           stdout mismatch:
E             expected: 'OPTIND=1 opt=a OPTARG=\nOPTIND=2 opt=b OPTARG='
E             actual:   'OPTIND= opt=a OPTARG=\nOPTIND=2 opt=b OPTARG='
E           
E           Expected stdout: 'OPTIND=1 opt=a OPTARG=\nOPTIND=2 opt=b OPTARG='
E           Actual stdout:   'OPTIND= opt=a OPTARG=\nOPTIND=2 opt=b OPTARG=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           getopts "ab" opt -ab
E           echo OPTIND=$OPTIND opt=$opt OPTARG=$OPTARG
E           getopts "ab" opt -ab
E           echo OPTIND=$OPTIND opt=$opt OPTARG=$OPTARG
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::flag and arg: -c10[L318]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a76b0>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name='flag and arg: -c10', script='getopts "c:" opt -c10\necho OPTIND=$OPTIND opt=$opt OPTARG=$OPTARG\ngetopt...lue='OPTIND=2 opt=c OPTARG=10\nOPTIND=2 opt=? OPTARG=10', shells=['dash'], variant='BUG')], line_number=318, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: flag and arg: -c10 (line 318)
E           
E           stdout mismatch:
E             expected: 'OPTIND=2 opt=c OPTARG=10\nOPTIND=2 opt=? OPTARG='
E             actual:   'OPTIND=2 opt=c OPTARG=10\nOPTIND=2 opt=? OPTARG=10'
E           
E           Expected stdout: 'OPTIND=2 opt=c OPTARG=10\nOPTIND=2 opt=? OPTARG='
E           Actual stdout:   'OPTIND=2 opt=c OPTARG=10\nOPTIND=2 opt=? OPTARG=10\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           getopts "c:" opt -c10
E           echo OPTIND=$OPTIND opt=$opt OPTARG=$OPTARG
E           getopts "c:" opt -c10
E           echo OPTIND=$OPTIND opt=$opt OPTARG=$OPTARG
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::More Smooshing 1[L332]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a7770>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name='More Smooshing 1', script='getopts "ab:c:" opt -ab hi -c hello\necho OPTIND=$OPTIND opt=$opt OPTARG=$OP...=b OPTARG=hi\nOPTIND=5 opt=c OPTARG=hello', shells=['dash', 'mksh', 'ash'], variant='OK')], line_number=332, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: More Smooshing 1 (line 332)
E           
E           stdout mismatch:
E             expected: 'OPTIND=1 opt=a OPTARG=\nOPTIND=3 opt=b OPTARG=hi\nOPTIND=5 opt=c OPTARG=hello'
E             actual:   'OPTIND= opt=a OPTARG=\nOPTIND=3 opt=b OPTARG=hi\nOPTIND=5 opt=c OPTARG=hello'
E           
E           Expected stdout: 'OPTIND=1 opt=a OPTARG=\nOPTIND=3 opt=b OPTARG=hi\nOPTIND=5 opt=c OPTARG=hello'
E           Actual stdout:   'OPTIND= opt=a OPTARG=\nOPTIND=3 opt=b OPTARG=hi\nOPTIND=5 opt=c OPTARG=hello\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           getopts "ab:c:" opt -ab hi -c hello
E           echo OPTIND=$OPTIND opt=$opt OPTARG=$OPTARG
E           getopts "ab:c:" opt -ab hi -c hello
E           echo OPTIND=$OPTIND opt=$opt OPTARG=$OPTARG
E           getopts "ab:c:" opt -ab hi -c hello
E           echo OPTIND=$OPTIND opt=$opt OPTARG=$OPTARG
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::More Smooshing 2[L350]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a7830>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name='More Smooshing 2', script='getopts "abc:" opt -abc10\necho OPTIND=$OPTIND opt=$opt OPTARG=$OPTARG\ngeto...2 opt=b OPTARG=\nOPTIND=2 opt=c OPTARG=10', shells=['dash', 'mksh', 'ash'], variant='OK')], line_number=350, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: More Smooshing 2 (line 350)
E           
E           stdout mismatch:
E             expected: 'OPTIND=1 opt=a OPTARG=\nOPTIND=1 opt=b OPTARG=\nOPTIND=2 opt=c OPTARG=10'
E             actual:   'OPTIND= opt=a OPTARG=\nOPTIND= opt=b OPTARG=\nOPTIND=2 opt=c OPTARG=10'
E           
E           Expected stdout: 'OPTIND=1 opt=a OPTARG=\nOPTIND=1 opt=b OPTARG=\nOPTIND=2 opt=c OPTARG=10'
E           Actual stdout:   'OPTIND= opt=a OPTARG=\nOPTIND= opt=b OPTARG=\nOPTIND=2 opt=c OPTARG=10\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           getopts "abc:" opt -abc10
E           echo OPTIND=$OPTIND opt=$opt OPTARG=$OPTARG
E           getopts "abc:" opt -abc10
E           echo OPTIND=$OPTIND opt=$opt OPTARG=$OPTARG
E           getopts "abc:" opt -abc10
E           echo OPTIND=$OPTIND opt=$opt OPTARG=$OPTARG
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::OPTIND should be >= 1 (regression)[L368]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a78f0>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name='OPTIND should be >= 1 (regression)', script='OPTIND=-1\ngetopts a: foo\necho status=$?\n\nOPTIND=0\nget...'], variant='OK'), Assertion(type='stdout-json', value='', shells=['dash'], variant='OK')], line_number=368, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: OPTIND should be >= 1 (regression) (line 368)
E           
E           Execution error: list index out of range
E           
E           
E           Script:
E           ---
E           OPTIND=-1
E           getopts a: foo
E           echo status=$?
E           
E           OPTIND=0
E           getopts a: foo
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::getopts bug #1523[L384]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a79b0>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name='getopts bug #1523', script='# Create test script inline - getopts with abc: optspec\ncat > /tmp/getopts...lue='opt:a\nopt:b\nopt:c arg:def\nopt:a\nopt:b\nopt:c arg:de', shells=None, variant=None)], line_number=384, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: getopts bug #1523 (line 384)
E           
E           Execution error: Expected 'done' to close while loop at line 1, column 29
E           
E           
E           Script:
E           ---
E           # Create test script inline - getopts with abc: optspec
E           cat > /tmp/getopts-1523.sh <<'SCRIPT'
E           while getopts "abc:" opt; do
E             case $opt in
E               a|b) echo "opt:$opt" ;;
E               c) echo "opt:$opt arg:$OPTARG" ;;
E               '?') echo "err:$opt" ;;
E             esac
E           done
E           exit 1
E           SCRIPT
E           
E           $SH /tmp/getopts-1523.sh -abcdef -abcde
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::More regression for #1523[L409]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a7a70>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name='More regression for #1523', script='# Uses /tmp/getopts-1523.sh from previous test\n$SH /tmp/getopts-15...ut', value='opt:a\nopt:b\nopt:c arg:def\nerr:?\nerr:?\nerr:?', shells=None, variant=None)], line_number=409, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: More regression for #1523 (line 409)
E           
E           stdout mismatch:
E             expected: 'opt:a\nopt:b\nopt:c arg:def\nerr:?\nerr:?\nerr:?'
E             actual:   ''
E           status mismatch: expected 1, got 127
E           
E           Expected stdout: 'opt:a\nopt:b\nopt:c arg:def\nerr:?\nerr:?\nerr:?'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: /tmp/getopts-1523.sh: No such file or directory\n'
E           Expected status: 1
E           Actual status:   127
E           
E           Script:
E           ---
E           # Uses /tmp/getopts-1523.sh from previous test
E           $SH /tmp/getopts-1523.sh -abcdef -xyz
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-getopts.test.sh::getopts normal mode - invalid option (compare with silent)[L444]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a7cb0>
test_file = 'builtin-getopts.test.sh'
test_case = TestCase(name='getopts normal mode - invalid option (compare with silent)', script='# Normal mode: OPTARG is empty, pr...one), Assertion(type='stdout', value='status=0 opt=? OPTARG=', shells=None, variant=None)], line_number=444, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: getopts normal mode - invalid option (compare with silent) (line 444)
E           
E           stdout mismatch:
E             expected: 'status=0 opt=? OPTARG='
E             actual:   'status=0 opt=? OPTARG=Z'
E           
E           Expected stdout: 'status=0 opt=? OPTARG='
E           Actual stdout:   'status=0 opt=? OPTARG=Z\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # Normal mode: OPTARG is empty, prints error message
E           set -- -Z
E           getopts 'a:' opt 2>/dev/null
E           echo "status=$? opt=$opt OPTARG=$OPTARG"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-meta-assign.test.sh::builtin declare a=(x y) is allowed[L4]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091a7fb0>
test_file = 'builtin-meta-assign.test.sh'
test_case = TestCase(name='builtin declare a=(x y) is allowed', script="case $SH in dash|zsh|mksh|ash) exit ;; esac\n\n$SH -c 'dec...), Assertion(type='stdout', value='', shells=['dash', 'zsh', 'mksh', 'ash'], variant='N-I')], line_number=4, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: builtin declare a=(x y) is allowed (line 4)
E           
E           stdout mismatch:
E             expected: 'declare -a a=([0]="x" [1]="y")\nfail\nfail'
E             actual:   'declare -- a="(x y)"\ndeclare -- a="(x y)"\ndeclare -a a=([0]="x" [1]="y")'
E           
E           Expected stdout: 'declare -a a=([0]="x" [1]="y")\nfail\nfail'
E           Actual stdout:   'declare -- a="(x y)"\ndeclare -- a="(x y)"\ndeclare -a a=([0]="x" [1]="y")\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|zsh|mksh|ash) exit ;; esac
E           
E           $SH -c 'declare a=(x y); declare -p a'
E           if test $? -ne 0; then
E             echo 'fail'
E           fi
E           
E           $SH -c 'builtin declare a=(x y); declare -p a'
E           if test $? -ne 0; then
E             echo 'fail'
E           fi
E           
E           $SH -c 'builtin declare -a a=(x y); declare -p a'
E           if test $? -ne 0; then
E             echo 'fail'
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-meta-assign.test.sh::export, builtin export[L89]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cc230>
test_file = 'builtin-meta-assign.test.sh'
test_case = TestCase(name='export, builtin export', script="x='a b'\n\nexport y=$x\necho $y\n\nbuiltin export z=$x\necho $z", asse...'], variant='N-I'), Assertion(type='stdout', value='a b\n', shells=['ash'], variant='N-I')], line_number=89, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: export, builtin export (line 89)
E           
E           stdout mismatch:
E             expected: 'a b\na'
E             actual:   'a\na'
E           
E           Expected stdout: 'a b\na'
E           Actual stdout:   'a\na\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x='a b'
E           
E           export y=$x
E           echo $y
E           
E           builtin export z=$x
E           echo $z
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-meta-assign.test.sh::\\command readonly - similar issue[L160]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cc3b0>
test_file = 'builtin-meta-assign.test.sh'
test_case = TestCase(name='\\command readonly - similar issue', script="case $SH in zsh) exit ;; esac\n\n# \\command readonly is e...dash'], variant='BUG'), Assertion(type='stdout', value='', shells=['zsh'], variant='N-I')], line_number=160, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \command readonly - similar issue (line 160)
E           
E           stdout mismatch:
E             expected: 'a b\na\na\na'
E             actual:   'a\na\na\na'
E           
E           Expected stdout: 'a b\na\na\na'
E           Actual stdout:   'a\na\na\na\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh) exit ;; esac
E           
E           # \command readonly is equivalent to \builtin declare
E           # except dash implements it
E           
E           x='a b'
E           
E           readonly b=$x
E           echo $b
E           
E           command readonly c=$x
E           echo $c
E           
E           \command readonly d=$x
E           echo $d
E           
E           'command' readonly e=$x
E           echo $e
E           
E           # The issue here is that we have a heuristic in EvalWordSequence2:
E           # fs len(part_vals) == 1
E           
E           
E           
E           # note: later versions of dash are fixed
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[builtin-meta.test.sh::command -v[L3]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cc830>
test_file = 'builtin-meta.test.sh'
test_case = TestCase(name='command -v', script="myfunc() { echo x; }\ncommand -v echo\necho $?\n\ncommand -v myfunc\necho $?\n\nco...o\n0\nmyfunc\n0\nnonexistent=127\nempty=127\nfor\n0', shells=['dash', 'ash'], variant='OK')], line_number=3, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: command -v (line 3)
E           
E           stdout mismatch:
E             expected: 'echo\n0\nmyfunc\n0\nnonexistent=1\nempty=1\nfor\n0'
E             actual:   'echo\n0\nmyfunc\n0\nnonexistent=1\nempty=1\n1'
E           
E           Expected stdout: 'echo\n0\nmyfunc\n0\nnonexistent=1\nempty=1\nfor\n0'
E           Actual stdout:   'echo\n0\nmyfunc\n0\nnonexistent=1\nempty=1\n1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: command: nonexistent: not found\nbash: command: : not found\nbash: command: for: not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           myfunc() { echo x; }
E           command -v echo
E           echo $?
E           
E           command -v myfunc
E           echo $?
E           
E           command -v nonexistent  # doesn't print anything
E           echo nonexistent=$?
E           
E           command -v ''  # BUG FIX, shouldn't succeed
E           echo empty=$?
E           
E           command -v for
E           echo $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-meta.test.sh::command -v executable, builtin[L40]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cc8f0>
test_file = 'builtin-meta.test.sh'
test_case = TestCase(name='command -v executable, builtin', script="#command -v grep ls\n\ncommand -v grep | egrep -o '/[^/]+$'\nc...ons=[Assertion(type='stdout', value='/grep\n/ls\n\ntrue\neval', shells=None, variant=None)], line_number=40, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: command -v executable, builtin (line 40)
E           
E           stdout mismatch:
E             expected: '/grep\n/ls\n\ntrue\neval'
E             actual:   '\ntrue\neval'
E           
E           Expected stdout: '/grep\n/ls\n\ntrue\neval'
E           Actual stdout:   '\ntrue\neval\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           #command -v grep ls
E           
E           command -v grep | egrep -o '/[^/]+$'
E           command -v ls | egrep -o '/[^/]+$'
E           echo
E           
E           command -v true
E           command -v eval
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-meta.test.sh::command -v with multiple names[L60]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cc9b0>
test_file = 'builtin-meta.test.sh'
test_case = TestCase(name='command -v with multiple names', script='# ALL FOUR SHELLS behave differently here!\n#\n# bash chooses ...), Assertion(type='stdout', value='echo\nmyfunc\nstatus=1', shells=['mksh'], variant='OK')], line_number=60, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: command -v with multiple names (line 60)
E           
E           stdout mismatch:
E             expected: 'echo\nmyfunc\nfor\nstatus=0'
E             actual:   'echo\nstatus=0'
E           
E           Expected stdout: 'echo\nmyfunc\nfor\nstatus=0'
E           Actual stdout:   'echo\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # ALL FOUR SHELLS behave differently here!
E           #
E           # bash chooses to swallow the error!  We agree with zsh if ANY word lookup
E           # fails, then the whole thing fails.
E           
E           myfunc() { echo x; }
E           command -v echo myfunc ZZZ for
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-meta.test.sh::command -v doesn't find non-executable file[L89]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cca70>
test_file = 'builtin-meta.test.sh'
test_case = TestCase(name="command -v doesn't find non-executable file", script='# PATH resolution is different\n\nmkdir -p _tmp\n...n-executable\nstatus=0\n_tmp/executable\nstatus=0', shells=['dash', 'ash'], variant='BUG')], line_number=89, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: command -v doesn't find non-executable file (line 89)
E           
E           stdout mismatch:
E             expected: 'status=1\n_tmp/executable\nstatus=0'
E             actual:   'status=1\nstatus=1'
E           
E           Expected stdout: 'status=1\n_tmp/executable\nstatus=0'
E           Actual stdout:   'status=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: command: _tmp/non-executable: not found\nbash: command: _tmp/executable: not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # PATH resolution is different
E           
E           mkdir -p _tmp
E           PATH="_tmp:$PATH"
E           touch _tmp/non-executable _tmp/executable
E           chmod +x _tmp/executable
E           
E           command -v _tmp/non-executable
E           echo status=$?
E           
E           command -v _tmp/executable
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-meta.test.sh::command -v doesn't find executable dir[L116]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091ccb30>
test_file = 'builtin-meta.test.sh'
test_case = TestCase(name="command -v doesn't find executable dir", script='mkdir -p _tmp\nPATH="_tmp:$PATH"\nmkdir _tmp/cat\n\nco...alue='_tmp/cat\nstatus=0\n/usr/bin/cat\nstatus=0', shells=['ash', 'dash'], variant='BUG')], line_number=116, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: command -v doesn't find executable dir (line 116)
E           
E           stdout mismatch:
E             expected: 'status=1\n/usr/bin/cat\nstatus=0'
E             actual:   'status=1\ncat\nstatus=0'
E           
E           Expected stdout: 'status=1\n/usr/bin/cat\nstatus=0'
E           Actual stdout:   'status=1\ncat\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   'bash: command: _tmp/cat: not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkdir -p _tmp
E           PATH="_tmp:$PATH"
E           mkdir _tmp/cat
E           
E           command -v _tmp/cat
E           echo status=$?
E           command -v cat
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[builtin-meta.test.sh::command -V[L146]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091ccbf0>
test_file = 'builtin-meta.test.sh'
test_case = TestCase(name='command -V', script='myfunc() { echo x; }\n\nshopt -s expand_aliases\nalias ll=\'ls -l\'\n\nbacktick=\\...und\nstatus=127\nfor is a shell keyword\nstatus=0', shells=['dash', 'ash'], variant='OK')], line_number=146, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: command -V (line 146)
E           
E           stdout mismatch:
E             expected: "ll is aliased to 'ls -l'\nstatus=0\necho is a shell builtin\nstatus=0\nmyfunc is a function\nmyfunc ()\n{\n    echo x\n}\nstatus=0\nstatus=1\nfor is a shell keyword\nstatus=0"
E             actual:   'status=0\necho is echo\nstatus=0\nmyfunc is a function\nstatus=0\nstatus=1\nstatus=1'
E           
E           Expected stdout: "ll is aliased to 'ls -l'\nstatus=0\necho is a shell builtin\nstatus=0\nmyfunc is a function\nmyfunc () \n{ \n    echo x\n}\nstatus=0\nstatus=1\nfor is a shell keyword\nstatus=0"
E           Actual stdout:   'status=0\necho is echo\nstatus=0\nmyfunc is a function\nstatus=0\nstatus=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: command: nonexistent: not found\nbash: command: for: not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           myfunc() { echo x; }
E           
E           shopt -s expand_aliases
E           alias ll='ls -l'
E           
E           backtick=\`
E           command -V ll | sed "s/$backtick/'/g"
E           echo status=$?
E           
E           command -V echo
E           echo status=$?
E           
E           # Paper over insignificant difference
E           command -V myfunc | sed 's/shell function/function/'
E           echo status=$?
E           
E           command -V nonexistent  # doesn't print anything
E           echo status=$?
E           
E           command -V for
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-meta.test.sh::command -p (override existing program)[L296]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091ccfb0>
test_file = 'builtin-meta.test.sh'
test_case = TestCase(name='command -p (override existing program)', script='# Tests whether command -p overrides the path\n# tr ch.../tr', assertions=[Assertion(type='stdout', value='wrong\nbbb', shells=None, variant=None)], line_number=296, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: command -p (override existing program) (line 296)
E           
E           stdout mismatch:
E             expected: 'wrong\nbbb'
E             actual:   'bbb'
E           
E           Expected stdout: 'wrong\nbbb'
E           Actual stdout:   'bbb\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Tests whether command -p overrides the path
E           # tr chosen because we need a simple non-builtin
E           mkdir -p $TMP/bin
E           echo "echo wrong" > $TMP/bin/tr
E           chmod +x $TMP/bin/tr
E           PATH="$TMP/bin:$PATH"
E           echo aaa | tr "a" "b"
E           echo aaa | command -p tr "a" "b"
E           rm $TMP/bin/tr
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-meta.test.sh::builtin usage[L361]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cd430>
test_file = 'builtin-meta.test.sh'
test_case = TestCase(name='builtin usage', script='builtin\necho status=$?\n\nbuiltin --\necho status=$?\n\nbuiltin -- false\necho...dout', value='status=127\nstatus=127\nstatus=127', shells=['dash', 'ash'], variant='N-I')], line_number=361, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: builtin usage (line 361)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=0\nstatus=1'
E             actual:   'status=0\nstatus=1\nstatus=1'
E           
E           Expected stdout: 'status=0\nstatus=0\nstatus=1'
E           Actual stdout:   'status=0\nstatus=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: builtin: --: not a shell builtin\nbash: builtin: --: not a shell builtin\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           builtin
E           echo status=$?
E           
E           builtin --
E           echo status=$?
E           
E           builtin -- false
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-misc.test.sh::history builtin usage[L4]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cd5b0>
test_file = 'builtin-misc.test.sh'
test_case = TestCase(name='history builtin usage', script='history\necho status=$?\nhistory +5  # hm bash considers this valid\nec...tatus=127\nstatus=127\nstatus=127\nstatus=127\nstatus=127', shells=['dash'], variant='N-I')], line_number=4, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: history builtin usage (line 4)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=0\nstatus=2\nstatus=1\nstatus=1'
E             actual:   'status=0\nstatus=0\nstatus=0\nstatus=0\nstatus=0'
E           
E           Expected stdout: 'status=0\nstatus=0\nstatus=2\nstatus=1\nstatus=1'
E           Actual stdout:   'status=0\nstatus=0\nstatus=0\nstatus=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           history
E           echo status=$?
E           history +5  # hm bash considers this valid
E           echo status=$?
E           history -5  # invalid flag
E           echo status=$?
E           history f 
E           echo status=$?
E           history too many args
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-misc.test.sh::Print shell strings with weird chars: set and printf %q and ${x@Q}[L47]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cd670>
test_file = 'builtin-misc.test.sh'
test_case = TestCase(name='Print shell strings with weird chars: set and printf %q and ${x@Q}', script='# bash declare -p will pri...nt=None), Assertion(type='stdout', value='', shells=['dash', 'mksh', 'zsh'], variant='OK')], line_number=47, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Print shell strings with weird chars: set and printf %q and ${x@Q} (line 47)
E           
E           stdout mismatch:
E             expected: "foo=$'a\\nb\\001c\\'d'\npf  $'a\\nb\\001c\\'d'\n@Q  $'a\\nb\\001c\\'d'"
E             actual:   "foo=''\npf  ''\n@Q  ''"
E           
E           Expected stdout: "foo=$'a\\nb\\001c\\'d'\npf  $'a\\nb\\001c\\'d'\n@Q  $'a\\nb\\001c\\'d'"
E           Actual stdout:   "foo=''\npf  ''\n@Q  ''\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash declare -p will print binary data, which makes this invalid UTF-8!
E           foo=$(/bin/echo -e 'a\nb\xffc'\'d)
E           
E           # let's test the easier \x01, which doesn't give bash problems
E           foo=$(/bin/echo -e 'a\nb\x01c'\'d)
E           
E           # dash:
E           #   only supports 'set'; prints it on multiple lines with binary data
E           #   switches to "'" for single quotes, not \'
E           # zsh:
E           #   print binary data all the time, except for printf %q
E           #   does print $'' strings
E           # mksh:
E           #   prints binary data for @Q
E           #   prints $'' strings
E           
E           # All are very inconsistent.
E           
E           case $SH in dash|mksh|zsh) return ;; esac
E           
E           
E           set | grep '^foo='
E           
E           # Will print multi-line and binary data literally!
E           #declare -p foo
E           
E           printf 'pf  %q\n' "$foo"
E           
E           echo '@Q ' ${foo@Q}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-misc.test.sh::Print shell strings with normal chars: set and printf %q and ${x@Q}[L88]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cd730>
test_file = 'builtin-misc.test.sh'
test_case = TestCase(name='Print shell strings with normal chars: set and printf %q and ${x@Q}', script='# There are variations on..., variant='OK'), Assertion(type='stdout', value='', shells=['dash', 'zsh'], variant='N-I')], line_number=88, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Print shell strings with normal chars: set and printf %q and ${x@Q} (line 88)
E           
E           stdout mismatch:
E             expected: 'foo=spam\ndeclare -- foo="spam"\npf  spam\n@Q  \'spam\''
E             actual:   'foo=\'spam\'\ndeclare -- foo="spam"\npf  spam\n@Q  \'spam\''
E           
E           Expected stdout: 'foo=spam\ndeclare -- foo="spam"\npf  spam\n@Q  \'spam\''
E           Actual stdout:   'foo=\'spam\'\ndeclare -- foo="spam"\npf  spam\n@Q  \'spam\'\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # There are variations on whether quotes are printed
E           
E           case $SH in dash|zsh) return ;; esac
E           
E           foo=spam
E           
E           set | grep '^foo='
E           
E           # Will print multi-line and binary data literally!
E           typeset -p foo
E           
E           printf 'pf  %q\n' "$foo"
E           
E           echo '@Q ' ${foo@Q}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf -v a[1][L60]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cdd30>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf -v a[1]', script='a=(a b c)\nprintf -v \'a[1]\' %s \'foo\'\necho status=$?\nargv.py "${a[@]}"', ..., variant='N-I'), Assertion(type='status', value=2, shells=['dash', 'ash'], variant='N-I')], line_number=60, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf -v a[1] (line 60)
E           
E           stdout mismatch:
E             expected: "status=0\n['a', 'foo', 'c']"
E             actual:   "status=0\n['a', 'b', 'c']"
E           
E           Expected stdout: "status=0\n['a', 'foo', 'c']"
E           Actual stdout:   "status=0\n['a', 'b', 'c']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(a b c)
E           printf -v 'a[1]' %s 'foo'
E           echo status=$?
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf -v syntax error[L76]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cddf0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf -v syntax error', script="printf -v 'a[' %s 'foo'\necho status=$?", assertions=[Assertion(type='...Assertion(type='stdout', value='-vstatus=0', shells=['ash', 'mksh', 'zsh'], variant='N-I')], line_number=76, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf -v syntax error (line 76)
E           
E           stdout mismatch:
E             expected: 'status=2'
E             actual:   'status=0'
E           
E           Expected stdout: 'status=2'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf -v 'a[' %s 'foo'
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf -v dynamic scope[L115]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091ce030>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf -v dynamic scope', script='case $SH in mksh|zsh|dash|ash) echo not implemented; exit ;; esac\n# ...='stdout', value='not implemented', shells=['dash', 'ash', 'mksh', 'zsh'], variant='N-I')], line_number=115, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf -v dynamic scope (line 115)
E           
E           stdout mismatch:
E             expected: 'dollar=dollar\n--\ndollar=\\$\nmylocal=mylocal\n--\ndollar=\\$\nmylocal='
E             actual:   "dollar=dollar\n--\ndollar=$'$'\nmylocal=mylocal\n--\ndollar=$'$'\nmylocal="
E           
E           Expected stdout: 'dollar=dollar\n--\ndollar=\\$\nmylocal=mylocal\n--\ndollar=\\$\nmylocal='
E           Actual stdout:   "dollar=dollar\n--\ndollar=$'$'\nmylocal=mylocal\n--\ndollar=$'$'\nmylocal=\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh|zsh|dash|ash) echo not implemented; exit ;; esac
E           # OK so printf is like assigning to a var.
E           # printf -v foo %q "$bar" is like
E           # foo=${bar@Q}
E           dollar='dollar'
E           f() {
E             local mylocal=foo
E             printf -v dollar %q '$'  # assign foo to a quoted dollar
E             printf -v mylocal %q 'mylocal'
E             echo dollar=$dollar
E             echo mylocal=$mylocal
E           }
E           echo dollar=$dollar
E           echo --
E           f
E           echo --
E           echo dollar=$dollar
E           echo mylocal=$mylocal
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf with too few arguments[L156]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091ce0f0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf with too few arguments', script="printf -- '-%s-%s-%s-\\n' 'a b' 'x y'", assertions=[Assertion(type='stdout', value='-a b-x y--', shells=None, variant=None)], line_number=156, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf with too few arguments (line 156)
E           
E           stdout mismatch:
E             expected: '-a b-x y--'
E             actual:   '--'
E           
E           Expected stdout: '-a b-x y--'
E           Actual stdout:   '--'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf -- '-%s-%s-%s-\n' 'a b' 'x y'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf with too many arguments[L162]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091ce1b0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf with too many arguments', script="printf -- '-%s-%s-\\n' a b c d e", assertions=[Assertion(type='stdout', value='-a-b-\n-c-d-\n-e--', shells=None, variant=None)], line_number=162, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf with too many arguments (line 162)
E           
E           stdout mismatch:
E             expected: '-a-b-\n-c-d-\n-e--'
E             actual:   '--'
E           
E           Expected stdout: '-a-b-\n-c-d-\n-e--'
E           Actual stdout:   '--'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf -- '-%s-%s-\n' a b c d e
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf %6.4d -- "precision" does padding for integers[L198]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091ce3f0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf %6.4d -- "precision" does padding for integers', script="printf '[%6.4d]\\n' 42\nprintf '[%.4d]\...'[  0042]\n[0042]\n[    42]\n--\n[ -0042]\n[-0042]\n[   -42]', shells=None, variant=None)], line_number=198, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf %6.4d -- "precision" does padding for integers (line 198)
E           
E           stdout mismatch:
E             expected: '[  0042]\n[0042]\n[    42]\n--\n[ -0042]\n[-0042]\n[   -42]'
E             actual:   '[  0042]\n[0042]\n[%6.d]\n--\n[ -0042]\n[-0042]\n[%6.d]'
E           
E           Expected stdout: '[  0042]\n[0042]\n[    42]\n--\n[ -0042]\n[-0042]\n[   -42]'
E           Actual stdout:   '[  0042]\n[0042]\n[%6.d]\n--\n[ -0042]\n[-0042]\n[%6.d]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf '[%6.4d]\n' 42
E           printf '[%.4d]\n' 42
E           printf '[%6.d]\n' 42
E           echo --
E           printf '[%6.4d]\n' -42
E           printf '[%.4d]\n' -42
E           printf '[%6.d]\n' -42
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf %6.4x X o[L216]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091ce4b0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf %6.4x X o', script="printf '[%6.4x]\\n' 42\nprintf '[%.4x]\\n' 42\nprintf '[%6.x]\\n' 42\necho -...\n[  002A]\n[002A]\n[    2A]\n--\n[  0052]\n[0052]\n[    52]', shells=None, variant=None)], line_number=216, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf %6.4x X o (line 216)
E           
E           stdout mismatch:
E             expected: '[  002a]\n[002a]\n[    2a]\n--\n[  002A]\n[002A]\n[    2A]\n--\n[  0052]\n[0052]\n[    52]'
E             actual:   '[  002a]\n[002a]\n[%6.x]\n--\n[  002A]\n[002A]\n[%6.X]\n--\n[  0052]\n[0052]\n[%6.o]'
E           
E           Expected stdout: '[  002a]\n[002a]\n[    2a]\n--\n[  002A]\n[002A]\n[    2A]\n--\n[  0052]\n[0052]\n[    52]'
E           Actual stdout:   '[  002a]\n[002a]\n[%6.x]\n--\n[  002A]\n[002A]\n[%6.X]\n--\n[  0052]\n[0052]\n[%6.o]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf '[%6.4x]\n' 42
E           printf '[%.4x]\n' 42
E           printf '[%6.x]\n' 42
E           echo --
E           printf '[%6.4X]\n' 42
E           printf '[%.4X]\n' 42
E           printf '[%6.X]\n' 42
E           echo --
E           printf '[%6.4o]\n' 42
E           printf '[%.4o]\n' 42
E           printf '[%6.o]\n' 42
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf %6.s and %0.s[L312]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091ce930>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf %6.s and %0.s', script="printf '[%6.s]\\n' foo\nprintf '[%0.s]\\n' foo", assertions=[Assertion(t...mksh'], variant='N-I'), Assertion(type='status', value=1, shells=['mksh'], variant='N-I')], line_number=312, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf %6.s and %0.s (line 312)
E           
E           stdout mismatch:
E             expected: '[      ]\n[]'
E             actual:   '[%6.s]\n[%0.s]'
E           
E           Expected stdout: '[      ]\n[]'
E           Actual stdout:   '[%6.s]\n[%0.s]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf '[%6.s]\n' foo
E           printf '[%0.s]\n' foo
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::Invalid UTF-8[L501]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cee70>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='Invalid UTF-8', script='echo bytes1\nnot_utf8=$(python2 -c \'print("\\xce\\xce")\')\n\nprintf \'%x\\n\'...n274\n\noverlong2\nc1\n193\n301\n\noverlong3\ne0\n224\n340\n', shells=None, variant=None)], line_number=501, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Invalid UTF-8 (line 501)
E           
E           stdout mismatch:
E             expected: 'bytes1\nce\n206\n316\n\nbytes2\nbc\n188\n274\n\noverlong2\nc1\n193\n301\n\noverlong3\ne0\n224\n340'
E             actual:   'bytes1\n0\n0\n0\n\nbytes2\n0\n0\n0\n\noverlong2\n0\n0\n0\n\noverlong3\n0\n0\n0'
E           
E           Expected stdout: 'bytes1\nce\n206\n316\n\nbytes2\nbc\n188\n274\n\noverlong2\nc1\n193\n301\n\noverlong3\ne0\n224\n340\n'
E           Actual stdout:   'bytes1\n0\n0\n0\n\nbytes2\n0\n0\n0\n\noverlong2\n0\n0\n0\n\noverlong3\n0\n0\n0\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo bytes1
E           not_utf8=$(python2 -c 'print("\xce\xce")')
E           
E           printf '%x\n' \'$not_utf8
E           printf '%u\n' \'$not_utf8
E           printf '%o\n' \'$not_utf8
E           echo
E           
E           echo bytes2
E           not_utf8=$(python2 -c 'print("\xbc\xbc")')
E           printf '%x\n' \'$not_utf8
E           printf '%u\n' \'$not_utf8
E           printf '%o\n' \'$not_utf8
E           echo
E           
E           # Copied from data_lang/utf8_test.cc
E           
E           echo overlong2
E           overlong2=$(python2 -c 'print("\xC1\x81")')
E           printf '%x\n' \'$overlong2
E           printf '%u\n' \'$overlong2
E           printf '%o\n' \'$overlong2
E           echo
E           
E           echo overlong3
E           overlong3=$(python2 -c 'print("\xE0\x81\x81")')
E           printf '%x\n' \'$overlong3
E           printf '%u\n' \'$overlong3
E           printf '%o\n' \'$overlong3
E           echo
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::Too large[L559]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cef30>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='Too large', script='case $SH in mksh) echo \'weird bug\'; exit ;; esac\n\necho too large\ntoo_large=$(p...Assertion(type='stdout', value='too large\nf4\n244\n364\n', shells=['osh'], variant='OK')], line_number=559, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Too large (line 559)
E           
E           stdout mismatch:
E             expected: 'too large\n111111\n1118481\n4210421'
E             actual:   'too large\n0\n0\n0'
E           
E           Expected stdout: 'too large\n111111\n1118481\n4210421\n'
E           Actual stdout:   'too large\n0\n0\n0\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) echo 'weird bug'; exit ;; esac
E           
E           echo too large
E           too_large=$(python2 -c 'print("\xF4\x91\x84\x91")')
E           printf '%x\n' \'$too_large
E           printf '%u\n' \'$too_large
E           printf '%o\n' \'$too_large
E           echo
E           
E           
E           
E           
E           # osh rejects code points that are too large for a DIFFERENT reason
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::negative numbers with unsigned / octal / hex[L601]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091ceff0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='negative numbers with unsigned / octal / hex', script="printf '[%u]\\n' -42\necho status=$?\n\nprintf '...='stdout', value='status=1\nstatus=1\nstatus=1\nstatus=1', shells=['osh'], variant='N-I')], line_number=601, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: negative numbers with unsigned / octal / hex (line 601)
E           
E           stdout mismatch:
E             expected: '[18446744073709551574]\nstatus=0\n[1777777777777777777726]\nstatus=0\n[ffffffffffffffd6]\nstatus=0\n[FFFFFFFFFFFFFFD6]\nstatus=0'
E             actual:   '[-42]\nstatus=0\n[-52]\nstatus=0\n[-2a]\nstatus=0\n[-2A]\nstatus=0'
E           
E           Expected stdout: '[18446744073709551574]\nstatus=0\n[1777777777777777777726]\nstatus=0\n[ffffffffffffffd6]\nstatus=0\n[FFFFFFFFFFFFFFD6]\nstatus=0'
E           Actual stdout:   '[-42]\nstatus=0\n[-52]\nstatus=0\n[-2a]\nstatus=0\n[-2A]\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf '[%u]\n' -42
E           echo status=$?
E           
E           printf '[%o]\n' -42
E           echo status=$?
E           
E           printf '[%x]\n' -42
E           echo status=$?
E           
E           printf '[%X]\n' -42
E           echo status=$?
E           
E           
E           # osh DISALLOWS this because the output depends on the machine architecture.
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf backslash escapes[L698]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cf2f0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf backslash escapes', script='argv.py "$(printf \'a\\tb\')"\nargv.py "$(printf \'\\xE2\\x98\\xA0\'...'a\\tb']\n['\\\\xE2\\\\x98\\\\xA0']\n['$e']\n['\\x1f7']", shells=['dash'], variant='N-I')], line_number=698, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf backslash escapes (line 698)
E           
E           stdout mismatch:
E             expected: "['a\\tb']\n['\\xe2\\x98\\xa0']\n['$e']\n['\\x1f7']"
E             actual:   "['a\tb']\n['☠']\n['$e']\n['ÿ']"
E           
E           Expected stdout: "['a\\tb']\n['\\xe2\\x98\\xa0']\n['$e']\n['\\x1f7']"
E           Actual stdout:   "['a\tb']\n['☠']\n['$e']\n['ÿ']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           argv.py "$(printf 'a\tb')"
E           argv.py "$(printf '\xE2\x98\xA0')"
E           argv.py "$(printf '\044e')"
E           argv.py "$(printf '\0377')"  # out of range
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf octal backslash escapes[L716]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cf3b0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf octal backslash escapes', script='argv.py "$(printf \'\\0377\')"\nargv.py "$(printf \'\\377\')"'...tions=[Assertion(type='stdout', value="['\\x1f7']\n['\\xff']", shells=None, variant=None)], line_number=716, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf octal backslash escapes (line 716)
E           
E           stdout mismatch:
E             expected: "['\\x1f7']\n['\\xff']"
E             actual:   "['ÿ']\n['ÿ']"
E           
E           Expected stdout: "['\\x1f7']\n['\\xff']"
E           Actual stdout:   "['ÿ']\n['ÿ']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           argv.py "$(printf '\0377')"
E           argv.py "$(printf '\377')"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf unicode backslash escapes[L724]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cf470>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf unicode backslash escapes', script='argv.py "$(printf \'\\u2620\')"\nargv.py "$(printf \'\\U0000...stdout', value="['\\\\u2620']\n['\\\\U0000065f']", shells=['dash', 'ash'], variant='N-I')], line_number=724, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf unicode backslash escapes (line 724)
E           
E           stdout mismatch:
E             expected: "['\\xe2\\x98\\xa0']\n['\\xd9\\x9f']"
E             actual:   "['☠']\n['ٟ']"
E           
E           Expected stdout: "['\\xe2\\x98\\xa0']\n['\\xd9\\x9f']"
E           Actual stdout:   "['☠']\n['ٟ']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           argv.py "$(printf '\u2620')"
E           argv.py "$(printf '\U0000065f')"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf invalid backslash escape (is ignored)[L736]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cf530>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf invalid backslash escape (is ignored)', script="printf '[\\Z]\\n'", assertions=[Assertion(type='stdout', value='[\\Z]', shells=None, variant=None)], line_number=736, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf invalid backslash escape (is ignored) (line 736)
E           
E           stdout mismatch:
E             expected: '[\\Z]'
E             actual:   '[Z]'
E           
E           Expected stdout: '[\\Z]'
E           Actual stdout:   '[Z]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf '[\Z]\n'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf %c unicode - prints the first BYTE of a string - it does not respect UTF-8[L760]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cf770>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf %c unicode - prints the first BYTE of a string - it does not respect UTF-8', script='# TODO: in ... variant=None), Assertion(type='stdout', value='', shells=['dash', 'ash'], variant='N-I')], line_number=760, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf %c unicode - prints the first BYTE of a string - it does not respect UTF-8 (line 760)
E           
E           stdout mismatch:
E             expected: '[μμ]\n ce'
E             actual:   '[μμ]\n  ce  bc'
E           
E           Expected stdout: '[μμ]\n ce'
E           Actual stdout:   '[μμ]\n  ce  bc\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # TODO: in YSH, this should be deprecated
E           case $SH in dash|ash) exit ;; esac
E           
E           show_bytes() {
E             od -A n -t x1
E           }
E           twomu=$'\u03bc\u03bc'
E           printf '[%s]\n' "$twomu"
E           
E           # Hm this cuts off a UTF-8 character?
E           printf '%c' "$twomu" | show_bytes
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf invalid format[L781]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cf830>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf invalid format', script="printf '%z' 42\necho status=$?\nprintf '%-z' 42\necho status=$?\n# osh ...ssertion(type='stdout', value='status=2\nstatus=2', shells=['dash', 'osh'], variant='OK')], line_number=781, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf invalid format (line 781)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1'
E             actual:   '%zstatus=0\n%-zstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1'
E           Actual stdout:   '%zstatus=0\n%-zstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf '%z' 42
E           echo status=$?
E           printf '%-z' 42
E           echo status=$?
E           # osh emits parse errors
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf %q[L796]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cf8f0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf %q', script='x=\'a b\'\nprintf \'[%q]\\n\' "$x"', assertions=[Assertion(type='stdout', value="['...'ash'], variant='N-I'), Assertion(type='status', value=2, shells=['dash'], variant='N-I')], line_number=796, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf %q (line 796)
E           
E           stdout mismatch:
E             expected: '[a\\ b]'
E             actual:   "[$'a b']"
E           
E           Expected stdout: '[a\\ b]'
E           Actual stdout:   "[$'a b']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x='a b'
E           printf '[%q]\n' "$x"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf %6q (width)[L809]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cf9b0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf %6q (width)', script='# NOTE: coreutils /usr/bin/printf does NOT implement this %6q !!!\nx=\'a b...'ash'], variant='N-I'), Assertion(type='status', value=2, shells=['dash'], variant='N-I')], line_number=809, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf %6q (width) (line 809)
E           
E           stdout mismatch:
E             expected: '[  a\\ b]\n[a\\ b]'
E             actual:   "[$'a b']\n[$'a b']"
E           
E           Expected stdout: '[  a\\ b]\n[a\\ b]'
E           Actual stdout:   "[$'a b']\n[$'a b']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # NOTE: coreutils /usr/bin/printf does NOT implement this %6q !!!
E           x='a b'
E           printf '[%6q]\n' "$x"
E           printf '[%1q]\n' "$x"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf negative numbers[L826]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cfa70>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf negative numbers', script="printf '[%d] ' -42\necho status=$?\nprintf '[%i] ' -42\necho status=$...0\n[0] status=1\n[0] status=1\n[0] status=1\n[0] status=1', shells=['ash'], variant='OK')], line_number=826, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf negative numbers (line 826)
E           
E           stdout mismatch:
E             expected: '[-42] status=0\n[-42] status=0\n[-42] status=0\n[-42] status=0\n[-42] status=1\n[-42] status=1\n[-42] status=1\n[-42] status=1'
E             actual:   '[-42] status=0\n[-42] status=0\n[-42] status=0\n[-42] status=0\n[-42] status=0\n[-42] status=0\n[0] status=0\n[0] status=0'
E           
E           Expected stdout: '[-42] status=0\n[-42] status=0\n[-42] status=0\n[-42] status=0\n[-42] status=1\n[-42] status=1\n[-42] status=1\n[-42] status=1'
E           Actual stdout:   '[-42] status=0\n[-42] status=0\n[-42] status=0\n[-42] status=0\n[-42] status=0\n[-42] status=0\n[0] status=0\n[0] status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf '[%d] ' -42
E           echo status=$?
E           printf '[%i] ' -42
E           echo status=$?
E           
E           # extra LEADING space too
E           printf '[%d] ' ' -42'
E           echo status=$?
E           printf '[%i] ' ' -42'
E           echo status=$?
E           
E           # extra TRAILING space too
E           printf '[%d] ' ' -42 '
E           echo status=$?
E           printf '[%i] ' ' -42 '
E           echo status=$?
E           
E           # extra TRAILING chars
E           printf '[%d] ' ' -42z'
E           echo status=$?
E           printf '[%i] ' ' -42z'
E           echo status=$?
E           
E           exit 0  # ok
E           
E           # zsh is LESS STRICT
E           
E           # osh is like zsh but has a hard failure (TODO: could be an option?)
E           
E           # ash is MORE STRICT
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf # flag[L914]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cfbf0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf # flag', script="# I didn't know these existed -- I only knew about - and 0 !\n# Note: '#' flag ...['osh'], variant='N-I'), Assertion(type='status', value=2, shells=['osh'], variant='N-I')], line_number=914, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf # flag (line 914)
E           
E           stdout mismatch:
E             expected: '[0][052]\n[0][0x2a]\n[0][0X2A]\n---\n[3][3.]\n[3][3.00000]'
E             actual:   '[0o0][0o52]\n[0x0][0x2a]\n[0X0][0X2A]\n---\n[3][3.]\n[3][3.00000]'
E           
E           Expected stdout: '[0][052]\n[0][0x2a]\n[0][0X2A]\n---\n[3][3.]\n[3][3.00000]'
E           Actual stdout:   '[0o0][0o52]\n[0x0][0x2a]\n[0X0][0X2A]\n---\n[3][3.]\n[3][3.00000]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # I didn't know these existed -- I only knew about - and 0 !
E           # Note: '#' flag for integers outputs a prefix ONLY WHEN the value is non-zero
E           printf '[%#o][%#o]\n' 0 42
E           printf '[%#x][%#x]\n' 0 42
E           printf '[%#X][%#X]\n' 0 42
E           echo ---
E           # Note: '#' flag for %f, %g always outputs the decimal point.
E           printf '[%.0f][%#.0f]\n' 3 3
E           # Note: In addition, '#' flag for %g does not omit zeroes in fraction
E           printf '[%g][%#g]\n' 3 3
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::Runtime error for invalid integer[L938]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cfcb0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='Runtime error for invalid integer', script="x=3abc\nprintf '%d\\n' $x\necho status=$?\nprintf '%d\\n' x...BUG'), Assertion(type='stdout', value='status=1\nstatus=1', shells=['osh'], variant='OK')], line_number=938, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Runtime error for invalid integer (line 938)
E           
E           stdout mismatch:
E             expected: '3\nstatus=1\n0\nstatus=1'
E             actual:   '0\nstatus=0\n0\nstatus=0'
E           
E           Expected stdout: '3\nstatus=1\n0\nstatus=1'
E           Actual stdout:   '0\nstatus=0\n0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x=3abc
E           printf '%d\n' $x
E           echo status=$?
E           printf '%d\n' xyz
E           echo status=$?
E           # zsh should exit 1 in both cases
E           # fails but also prints 0 instead of 3abc
E           # osh doesn't print anything invalid
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::%(strftime format)T[L970]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cfd70>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='%(strftime format)T', script="# The result depends on timezone\nexport TZ=Asia/Tokyo\nprintf '%(%Y-%m-%...ariant='N-I'), Assertion(type='stdout', value='status=2', shells=['dash'], variant='N-I')], line_number=970, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: %(strftime format)T (line 970)
E           
E           stdout mismatch:
E             expected: '2019-05-16\n2019-05-15\nstatus=0'
E             actual:   '%(%Y-%m-1557978599)T\n%(%Y-%m-1557978599)T\nstatus=0'
E           
E           Expected stdout: '2019-05-16\n2019-05-15\nstatus=0'
E           Actual stdout:   '%(%Y-%m-1557978599)T\n%(%Y-%m-1557978599)T\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # The result depends on timezone
E           export TZ=Asia/Tokyo
E           printf '%(%Y-%m-%d)T\n' 1557978599
E           export TZ=US/Eastern
E           printf '%(%Y-%m-%d)T\n' 1557978599
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::%(strftime format)T doesn't respect TZ if not exported[L989]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cfe30>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name="%(strftime format)T doesn't respect TZ if not exported", script='# note: this test leaks!  It assumes t...rtion(type='stdout-json', value='', shells=['mksh', 'zsh', 'ash', 'dash'], variant='N-I')], line_number=989, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: %(strftime format)T doesn't respect TZ if not exported (line 989)
E           
E           stdout mismatch:
E             expected: 'not equal'
E             actual:   ''
E           
E           Expected stdout: 'not equal'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # note: this test leaks!  It assumes that /etc/localtime is NOT Portugal.
E           
E           TZ=Portugal  # NOT exported
E           localtime=$(printf '%(%Y-%m-%d %H:%M:%S)T\n' 1557978599)
E           
E           # TZ is respected
E           export TZ=Portugal
E           tz=$(printf '%(%Y-%m-%d %H:%M:%S)T\n' 1557978599)
E           
E           #echo $localtime
E           #echo $tz
E           
E           if ! test "$localtime" = "$tz"; then
E             echo 'not equal'
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::%(strftime format)T TZ in environ but not in shell's memory[L1012]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cfef0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name="%(strftime format)T TZ in environ but not in shell's memory", script='# note: this test leaks!  It assu...tion(type='stdout-json', value='', shells=['mksh', 'zsh', 'ash', 'dash'], variant='N-I')], line_number=1012, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: %(strftime format)T TZ in environ but not in shell's memory (line 1012)
E           
E           stdout mismatch:
E             expected: 'not equal'
E             actual:   ''
E           
E           Expected stdout: 'not equal'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # note: this test leaks!  It assumes that /etc/localtime is NOT Portugal.
E           
E           # TZ is respected
E           export TZ=Portugal
E           tz=$(printf '%(%Y-%m-%d %H:%M:%S)T\n' 1557978599)
E           
E           unset TZ  # unset in the shell, but still in the environment
E           
E           localtime=$(printf '%(%Y-%m-%d %H:%M:%S)T\n' 1557978599)
E           
E           if ! test "$localtime" = "$tz"; then
E             echo 'not equal'
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::%10.5(strftime format)T[L1033]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091cffb0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='%10.5(strftime format)T', script="# The result depends on timezone\nexport TZ=Asia/Tokyo\nprintf '[%10....ant='N-I'), Assertion(type='stdout', value='[[status=2', shells=['dash'], variant='N-I')], line_number=1033, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: %10.5(strftime format)T (line 1033)
E           
E           stdout mismatch:
E             expected: '[     2019-]\n[     2019-]\nstatus=0'
E             actual:   '[%10.5(%Y-%m-1557978599)T]\n[%10.5(%Y-%m-1557978599)T]\nstatus=0'
E           
E           Expected stdout: '[     2019-]\n[     2019-]\nstatus=0'
E           Actual stdout:   '[%10.5(%Y-%m-1557978599)T]\n[%10.5(%Y-%m-1557978599)T]\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # The result depends on timezone
E           export TZ=Asia/Tokyo
E           printf '[%10.5(%Y-%m-%d)T]\n' 1557978599
E           export TZ=US/Eastern
E           printf '[%10.5(%Y-%m-%d)T]\n' 1557978599
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::bash truncates long strftime string at 128[L1059]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f4170>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='bash truncates long strftime string at 128', script="case $SH in ash|dash|mksh|zsh) exit ;; esac\n\nstr...Assertion(type='stdout', value='', shells=['ash', 'dash', 'mksh', 'zsh'], variant='N-I')], line_number=1059, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: bash truncates long strftime string at 128 (line 1059)
E           
E           stdout mismatch:
E             expected: '4\n40\n120\n124\n0'
E             actual:   '6\n24\n64\n66\n68'
E           
E           Expected stdout: '4\n40\n120\n124\n0'
E           Actual stdout:   '6\n24\n64\n66\n68\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in ash|dash|mksh|zsh) exit ;; esac
E           
E           strftime-format() {
E             local n=$1
E           
E             # Prints increasingly long format strings:
E             # %(%Y)T %(%Y)T %(%Y%Y)T ...
E           
E             echo -n '%('
E             for i in $(seq $n); do
E               echo -n '%Y'
E             done
E             echo -n ')T'
E           }
E           
E           printf $(strftime-format 1) | wc --bytes
E           printf $(strftime-format 10) | wc --bytes
E           printf $(strftime-format 30) | wc --bytes
E           printf $(strftime-format 31) | wc --bytes
E           printf $(strftime-format 32) | wc --bytes
E           
E           case $SH in
E             (*/_bin/cxx-dbg/*)    
E               # Ensure that oils-for-unix detects the truncation of a fixed buffer.
E               # bash has a buffer of 128.
E           
E               set +o errexit
E               (
E                 printf $(strftime-format 1000)
E               )
E               status=$?
E               if test $status -ne 1; then
E                 echo FAIL
E               fi
E               ;;
E           esac
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf positive integer overflow[L1117]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f4230>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf positive integer overflow', script='# %i seems like a synonym for %d\n\nfor fmt in \'%u\\n\' \'%...407370955161\nstatus=0\n1844674407370955161\nstatus=0\n', shells=['zsh'], variant='BUG')], line_number=1117, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf positive integer overflow (line 1117)
E           
E           stdout mismatch:
E             expected: '18446744073709551615\nstatus=0\n18446744073709551615\nstatus=0\n\n9223372036854775807\nstatus=0\n9223372036854775807\nstatus=0'
E             actual:   '18446744073709551615\nstatus=0\n18446744073709551616\nstatus=0\n\n18446744073709551615\nstatus=0\n18446744073709551616\nstatus=0'
E           
E           Expected stdout: '18446744073709551615\nstatus=0\n18446744073709551615\nstatus=0\n\n9223372036854775807\nstatus=0\n9223372036854775807\nstatus=0\n'
E           Actual stdout:   '18446744073709551615\nstatus=0\n18446744073709551616\nstatus=0\n\n18446744073709551615\nstatus=0\n18446744073709551616\nstatus=0\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # %i seems like a synonym for %d
E           
E           for fmt in '%u\n' '%d\n'; do
E             # bash considers this in range for %u
E             # same with mksh
E             # zsh cuts everything off after 19 digits
E             # ash truncates everything
E             printf "$fmt" '18446744073709551615'
E             echo status=$?
E             printf "$fmt" '18446744073709551616'
E             echo status=$?
E             echo
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf negative integer overflow[L1199]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f42f0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf negative integer overflow', script='# %i seems like a synonym for %d\n\nfor fmt in \'%u\\n\' \'%...'0\nstatus=1\n0\nstatus=1\n\n0\nstatus=1\n0\nstatus=1\n', shells=['ash'], variant='BUG')], line_number=1199, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf negative integer overflow (line 1199)
E           
E           stdout mismatch:
E             expected: '1\nstatus=0\n18446744073709551615\nstatus=0\n\n-9223372036854775808\nstatus=0\n-9223372036854775808\nstatus=0'
E             actual:   '-18446744073709551615\nstatus=0\n-18446744073709551616\nstatus=0\n\n-18446744073709551615\nstatus=0\n-18446744073709551616\nstatus=0'
E           
E           Expected stdout: '1\nstatus=0\n18446744073709551615\nstatus=0\n\n-9223372036854775808\nstatus=0\n-9223372036854775808\nstatus=0\n'
E           Actual stdout:   '-18446744073709551615\nstatus=0\n-18446744073709551616\nstatus=0\n\n-18446744073709551615\nstatus=0\n-18446744073709551616\nstatus=0\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # %i seems like a synonym for %d
E           
E           for fmt in '%u\n' '%d\n'; do
E           
E             printf "$fmt" '-18446744073709551615'
E             echo status=$?
E             printf "$fmt" '-18446744073709551616'
E             echo status=$?
E             echo
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf %b does backslash escaping[L1278]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f43b0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf %b does backslash escaping', script="printf '[%s]\\n' '\\044'  # escapes not evaluated\nprintf '...alue='[\\044]\n[$]\n\n[\\x7e]\n[\\x7e]\n\n[\\A]\n[\\A]', shells=['dash'], variant='N-I')], line_number=1278, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf %b does backslash escaping (line 1278)
E           
E           stdout mismatch:
E             expected: '[\\044]\n[$]\n\n[\\x7e]\n[~]\n\n[\\A]\n[\\A]'
E             actual:   '[\\044]\n[$]\n\n[\\x7e]\n[~]\n\n[\\A]\n[A]'
E           
E           Expected stdout: '[\\044]\n[$]\n\n[\\x7e]\n[~]\n\n[\\A]\n[\\A]'
E           Actual stdout:   '[\\044]\n[$]\n\n[\\x7e]\n[~]\n\n[\\A]\n[A]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf '[%s]\n' '\044'  # escapes not evaluated
E           printf '[%b]\n' '\044'  # YES, escapes evaluated
E           echo
E           
E           printf '[%s]\n' '\x7e'  # escapes not evaluated
E           printf '[%b]\n' '\x7e'  # YES, escapes evaluated
E           echo
E           
E           # not a valid escape
E           printf '[%s]\n' '\A'
E           printf '[%b]\n' '\A'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf %b respects \\c early return[L1329]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f4530>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf %b respects \\c early return', script="printf '[%b]\\n' 'ab\\ncd\\cxy'\necho $?", assertions=[Assertion(type='stdout', value='[ab\ncd0', shells=None, variant=None)], line_number=1329, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf %b respects \c early return (line 1329)
E           
E           stdout mismatch:
E             expected: '[ab\ncd0'
E             actual:   '[ab\ncdcxy]\n0'
E           
E           Expected stdout: '[ab\ncd0'
E           Actual stdout:   '[ab\ncdcxy]\n0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf '[%b]\n' 'ab\ncd\cxy'
E           echo $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::printf %b with truncated octal escapes[L1379]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f46b0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='printf %b with truncated octal escapes', script="# 8 is not a valid octal digit\n\nprintf '%b\\n' '\\55...ype='stdout', value='\\558\n-8\n\n 5c 37\n 07\n 07\n 07', shells=['zsh'], variant='N-I')], line_number=1379, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf %b with truncated octal escapes (line 1379)
E           
E           stdout mismatch:
E             expected: '-8\n-8\n\n 07\n 07\n 07\n 07'
E             actual:   '-8\n-8\n\n  07\n  07\n  07\n  07'
E           
E           Expected stdout: '-8\n-8\n\n 07\n 07\n 07\n 07'
E           Actual stdout:   '-8\n-8\n\n  07\n  07\n  07\n  07\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # 8 is not a valid octal digit
E           
E           printf '%b\n' '\558'
E           printf '%b\n' '\0558'
E           echo
E           
E           show_bytes() {
E             od -A n -t x1
E           }
E           printf '%b' '\7' | show_bytes
E           printf '%b' '\07' | show_bytes
E           printf '%b' '\007' | show_bytes
E           printf '%b' '\0007' | show_bytes
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::leading spaces are accepted in value given to %d %X, but not trailing spaces[L1509]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f48f0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='leading spaces are accepted in value given to %d %X, but not trailing spaces', script="case $SH in zsh)...ash'], variant='BUG'), Assertion(type='stdout', value='', shells=['zsh'], variant='BUG')], line_number=1509, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: leading spaces are accepted in value given to %d %X, but not trailing spaces (line 1509)
E           
E           stdout mismatch:
E             expected: '-123\nstatus=0\n-123\nstatus=1\n---\n63\nstatus=0\n255\nstatus=0\nFF\nstatus=0\nff\nstatus=0'
E             actual:   '-123\nstatus=0\n-123\nstatus=0\n---\n63\nstatus=0\n255\nstatus=0\nFF\nstatus=0\nff\nstatus=0'
E           
E           Expected stdout: '-123\nstatus=0\n-123\nstatus=1\n---\n63\nstatus=0\n255\nstatus=0\nFF\nstatus=0\nff\nstatus=0'
E           Actual stdout:   '-123\nstatus=0\n-123\nstatus=0\n---\n63\nstatus=0\n255\nstatus=0\nFF\nstatus=0\nff\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh) exit ;; esac
E           
E           # leading space is allowed
E           printf '%d\n' ' -123'
E           echo status=$?
E           printf '%d\n' ' -123 '
E           echo status=$?
E           
E           echo ---
E           
E           printf '%d\n' ' +077'
E           echo status=$?
E           
E           printf '%d\n' ' +0xff'
E           echo status=$?
E           
E           printf '%X\n' ' +0xff'
E           echo status=$?
E           
E           printf '%x\n' ' +0xff'
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-printf.test.sh::Arbitrary base 64#a is rejected (unlike in shell arithmetic)[L1584]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f49b0>
test_file = 'builtin-printf.test.sh'
test_case = TestCase(name='Arbitrary base 64#a is rejected (unlike in shell arithmetic)', script="printf '%d\\n' '64#a'\necho stat...OK'), Assertion(type='stdout', value='0\nstatus=1', shells=['zsh', 'ash'], variant='OK')], line_number=1584, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Arbitrary base 64#a is rejected (unlike in shell arithmetic) (line 1584)
E           
E           stdout mismatch:
E             expected: '64\nstatus=1'
E             actual:   '0\nstatus=0'
E           
E           Expected stdout: '64\nstatus=1'
E           Actual stdout:   '0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf '%d\n' '64#a'
E           echo status=$?
E           
E           # bash, dash, and mksh print 64 and return status 1
E           # zsh and ash print 0 and return status 1
E           # OSH rejects it completely (prints nothing) and returns status 1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read builtin with no newline returns status 1[L52]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f4d70>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read builtin with no newline returns status 1', script="# This is odd because the variable is populated..., shells=None, variant=None), Assertion(type='status', value=0, shells=None, variant=None)], line_number=52, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read builtin with no newline returns status 1 (line 52)
E           
E           stdout mismatch:
E             expected: 'status=1\nZZZ'
E             actual:   'status=0\nZZZ'
E           
E           Expected stdout: 'status=1\nZZZ'
E           Actual stdout:   'status=0\nZZZ\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # This is odd because the variable is populated successfully.  OSH/YSH might
E           # need a separate put reading feature that doesn't use IFS.
E           
E           echo -n ZZZ | { read x; echo status=$?; echo $x; }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -n doesn't strip whitespace (bug fix)[L100]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f5130>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name="read -n doesn't strip whitespace (bug fix)", script='case $SH in dash|zsh) exit ;; esac\n\necho \'  a b...n[a b]\n\nthree vars\n[a] [] []\n[a] [b] []\n[a] [b] []', shells=['mksh'], variant='BUG')], line_number=100, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -n doesn't strip whitespace (bug fix) (line 100)
E           
E           stdout mismatch:
E             expected: '[  a ]\n[  a b]\n[  a b ]\n\none var strips whitespace\n[a]\n[a b]\n[a b]\n\nthree vars\n[a] [] []\n[a] [b] []\n[a] [b] []'
E             actual:   '[]\n[]\n[]\n\none var strips whitespace\n[]\n[]\n[]\n\nthree vars\n[] [] []\n[] [] []\n[] [] []'
E           
E           Expected stdout: '[  a ]\n[  a b]\n[  a b ]\n\none var strips whitespace\n[a]\n[a b]\n[a b]\n\nthree vars\n[a] [] []\n[a] [b] []\n[a] [b] []'
E           Actual stdout:   '[]\n[]\n[]\n\none var strips whitespace\n[]\n[]\n[]\n\nthree vars\n[] [] []\n[] [] []\n[] [] []\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|zsh) exit ;; esac
E           
E           echo '  a b  ' | (read -n 4; echo "[$REPLY]")
E           echo '  a b  ' | (read -n 5; echo "[$REPLY]")
E           echo '  a b  ' | (read -n 6; echo "[$REPLY]")
E           echo
E           
E           echo 'one var strips whitespace'
E           echo '  a b  ' | (read -n 4 myvar; echo "[$myvar]")
E           echo '  a b  ' | (read -n 5 myvar; echo "[$myvar]")
E           echo '  a b  ' | (read -n 6 myvar; echo "[$myvar]")
E           echo
E           
E           echo 'three vars'
E           echo '  a b  ' | (read -n 4 x y z; echo "[$x] [$y] [$z]")
E           echo '  a b  ' | (read -n 5 x y z; echo "[$x] [$y] [$z]")
E           echo '  a b  ' | (read -n 6 x y z; echo "[$x] [$y] [$z]")
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -d -n - respects delimiter and splits[L154]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f51f0>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read -d -n - respects delimiter and splits', script='case $SH in dash|zsh|ash) exit ;; esac\n\necho \'d...\n[a b]\n\nthree vars\n[a] [] []\n[a] [] []\n[a] [b] []', shells=['mksh'], variant='BUG')], line_number=154, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -d -n - respects delimiter and splits (line 154)
E           
E           stdout mismatch:
E             expected: 'delim c\n[  a]\n[  a ]\n[  a b]\n\none var\n[a]\n[a]\n[a b]\n\nthree vars\n[a] [] []\n[a] [] []\n[a] [b] []'
E             actual:   'delim c\n[]\n[]\n[]\n\none var\n[]\n[]\n[]\n\nthree vars\n[] [] []\n[] [] []\n[] [] []'
E           
E           Expected stdout: 'delim c\n[  a]\n[  a ]\n[  a b]\n\none var\n[a]\n[a]\n[a b]\n\nthree vars\n[a] [] []\n[a] [] []\n[a] [b] []'
E           Actual stdout:   'delim c\n[]\n[]\n[]\n\none var\n[]\n[]\n[]\n\nthree vars\n[] [] []\n[] [] []\n[] [] []\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|zsh|ash) exit ;; esac
E           
E           echo 'delim c'
E           echo '  a b c ' | (read -d 'c' -n 3; echo "[$REPLY]")
E           echo '  a b c ' | (read -d 'c' -n 4; echo "[$REPLY]")
E           echo '  a b c ' | (read -d 'c' -n 5; echo "[$REPLY]")
E           echo
E           
E           echo 'one var'
E           echo '  a b c ' | (read -d 'c' -n 3 myvar; echo "[$myvar]")
E           echo '  a b c ' | (read -d 'c' -n 4 myvar; echo "[$myvar]")
E           echo '  a b c ' | (read -d 'c' -n 5 myvar; echo "[$myvar]")
E           echo
E           
E           echo 'three vars'
E           echo '  a b c ' | (read -d 'c' -n 3 x y z; echo "[$x] [$y] [$z]")
E           echo '  a b c ' | (read -d 'c' -n 4 x y z; echo "[$x] [$y] [$z]")
E           echo '  a b c ' | (read -d 'c' -n 5 x y z; echo "[$x] [$y] [$z]")
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read without args uses $REPLY, no splitting occurs (without -n)[L231]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f5430>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read without args uses $REPLY, no splitting occurs (without -n)', script='# mksh and zsh implement spli..., value='[]\n[a b  ]\n[]\n[a b    line2]\n[]\n[a b  \\]', shells=['dash'], variant='BUG')], line_number=231, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read without args uses $REPLY, no splitting occurs (without -n) (line 231)
E           
E           stdout mismatch:
E             expected: '[  a b  ]\n[a b]\n[  a b    line2]\n[a b    line2]\n[  a b  \\]\n[a b  \\]'
E             actual:   '[]\n[]\n[]\n[]\n[]\n[]'
E           
E           Expected stdout: '[  a b  ]\n[a b]\n[  a b    line2]\n[a b    line2]\n[  a b  \\]\n[a b  \\]'
E           Actual stdout:   '[]\n[]\n[]\n[]\n[]\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # mksh and zsh implement splitting with $REPLY, bash/ash don't
E           
E           echo '  a b  ' | (read; echo "[$REPLY]")
E           echo '  a b  ' | (read myvar; echo "[$myvar]")
E           
E           echo '  a b  \
E             line2' | (read; echo "[$REPLY]")
E           echo '  a b  \
E             line2' | (read myvar; echo "[$myvar]")
E           
E           # Now test with -r
E           echo '  a b  \
E             line2' | (read -r; echo "[$REPLY]")
E           echo '  a b  \
E             line2' | (read -r myvar; echo "[$myvar]")
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read with line continuation reads multiple physical lines[L365]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f58b0>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read with line continuation reads multiple physical lines', script='# NOTE: osh failing because of file...rtion(type='stdout', value="['-e onetwo', '-e one\\\\']", shells=['dash'], variant='N-I')], line_number=365, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read with line continuation reads multiple physical lines (line 365)
E           
E           stdout mismatch:
E             expected: "['onetwo', 'one\\\\']"
E             actual:   "['one\\\\', 'one\\\\']"
E           
E           Expected stdout: "['onetwo', 'one\\\\']"
E           Actual stdout:   "['one\\\\', 'one\\\\']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # NOTE: osh failing because of file descriptor issue.  stdin has to be closed!
E           tmp=$TMP/$(basename $SH)-readr.txt
E           echo -e 'one\\\ntwo\n' > $tmp
E           read escaped < $tmp
E           read -r raw < $tmp
E           argv.py "$escaped" "$raw"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read multiple vars spanning many lines[L375]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f5970>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read multiple vars spanning many lines', script='read x y << \'EOF\'\none-\\\ntwo three-\\\nfour five-\...type='stdout', value="['one-two', 'three-four five-six', '']", shells=None, variant=None)], line_number=375, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read multiple vars spanning many lines (line 375)
E           
E           stdout mismatch:
E             expected: "['one-two', 'three-four five-six', '']"
E             actual:   "['one-\\\\', '', '']"
E           
E           Expected stdout: "['one-two', 'three-four five-six', '']"
E           Actual stdout:   "['one-\\\\', '', '']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           read x y << 'EOF'
E           one-\
E           two three-\
E           four five-\
E           six
E           EOF
E           argv.py "$x" "$y" "$z"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read with IFS=$'\\n'[L415]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f5bb0>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name="read with IFS=$'\\n'", script='# The leading spaces are stripped if they appear in IFS.\nIFS=$(echo -e ... variant=None), Assertion(type='stdout', value='[a b c]', shells=['dash'], variant='N-I')], line_number=415, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read with IFS=$'\n' (line 415)
E           
E           stdout mismatch:
E             expected: '[  a b c]'
E             actual:   '[a b c]'
E           
E           Expected stdout: '[  a b c]'
E           Actual stdout:   '[a b c]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # The leading spaces are stripped if they appear in IFS.
E           IFS=$(echo -e '\n')
E           read var <<EOF
E             a b c
E             d e f
E           EOF
E           echo "[$var]"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read multiple lines with IFS=:[L426]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f5c70>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read multiple lines with IFS=:', script='# The leading spaces are stripped if they appear in IFS.\n# IF...ertions=[Assertion(type='stdout', value='[  \\a |b: c|d  e|]', shells=None, variant=None)], line_number=426, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read multiple lines with IFS=: (line 426)
E           
E           stdout mismatch:
E             expected: '[  \\a |b: c|d  e|]'
E             actual:   '[\\a |b| c|d\\]'
E           
E           Expected stdout: '[  \\a |b: c|d  e|]'
E           Actual stdout:   '[\\a |b| c|d\\]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # The leading spaces are stripped if they appear in IFS.
E           # IFS chars are escaped with :.
E           tmp=$TMP/$(basename $SH)-read-ifs.txt
E           IFS=:
E           cat >$tmp <<'EOF'
E             \\a :b\: c:d\
E             e
E           EOF
E           read a b c d < $tmp
E           # Use printf because echo in dash/mksh interprets escapes, while it doesn't in
E           # bash.
E           printf "%s\n" "[$a|$b|$c|$d]"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read with IFS=''[L441]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f5d30>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name="read with IFS=''", script='IFS=\'\'\nread x y <<EOF\n  a b c d\nEOF\necho "[$x|$y]"', assertions=[Assertion(type='stdout', value='[  a b c d|]', shells=None, variant=None)], line_number=441, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read with IFS='' (line 441)
E           
E           stdout mismatch:
E             expected: '[  a b c d|]'
E             actual:   '[a b c d|]'
E           
E           Expected stdout: '[  a b c d|]'
E           Actual stdout:   '[a b c d|]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           IFS=''
E           read x y <<EOF
E             a b c d
E           EOF
E           echo "[$x|$y]"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -a reads into array[L477]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f5f70>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read -a reads into array', script='# read -a is used in bash-completion\n# none of these shells impleme...rtion(type='stdout-json', value='', shells=['dash', 'mksh', 'zsh', 'ash'], variant='N-I')], line_number=477, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -a reads into array (line 477)
E           
E           stdout mismatch:
E             expected: "['a', 'b', 'c d']\n['a', 'b', 'c\\\\', 'd']\n[]\n[]"
E             actual:   "['a', 'b', 'c', 'd']\n['a', 'b', 'c\\\\', 'd']\n[]\n[]"
E           
E           Expected stdout: "['a', 'b', 'c d']\n['a', 'b', 'c\\\\', 'd']\n[]\n[]"
E           Actual stdout:   "['a', 'b', 'c', 'd']\n['a', 'b', 'c\\\\', 'd']\n[]\n[]\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # read -a is used in bash-completion
E           # none of these shells implement it
E           case $SH in
E             *mksh|*dash|*zsh|*/ash)
E               exit 2;
E               ;;
E           esac
E           
E           read -a myarray <<'EOF'
E           a b c\ d
E           EOF
E           argv.py "${myarray[@]}"
E           
E           # arguments are ignored here
E           read -r -a array2 extra arguments <<'EOF'
E           a b c\ d
E           EOF
E           argv.py "${array2[@]}"
E           argv.py "${extra[@]}"
E           argv.py "${arguments[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -d : (colon-separated records)[L509]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f6030>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read -d : (colon-separated records)', script='printf a,b,c:d,e,f:g,h,i | {\n  IFS=,\n  read -d : v1\n  ...sertion(type='stdout', value='v1=\nv1= v2=\nv1= v2= v3=', shells=['dash'], variant='N-I')], line_number=509, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -d : (colon-separated records) (line 509)
E           
E           stdout mismatch:
E             expected: 'v1=a,b,c\nv1=d v2=e,f\nv1=g v2=h v3=i'
E             actual:   'v1=\nv1= v2=\nv1= v2= v3='
E           
E           Expected stdout: 'v1=a,b,c\nv1=d v2=e,f\nv1=g v2=h v3=i'
E           Actual stdout:   'v1=\nv1= v2=\nv1= v2= v3=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf a,b,c:d,e,f:g,h,i | {
E             IFS=,
E             read -d : v1
E             echo "v1=$v1"
E             read -d : v1 v2
E             echo "v1=$v1 v2=$v2"
E             read -d : v1 v2 v3
E             echo "v1=$v1 v2=$v2 v3=$v3"
E           }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -d '' (null-separated records)[L530]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f60f0>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name="read -d '' (null-separated records)", script='printf \'a,b,c\\0d,e,f\\0g,h,i\' | {\n  IFS=,\n  read -d ...sertion(type='stdout', value='v1=\nv1= v2=\nv1= v2= v3=', shells=['dash'], variant='N-I')], line_number=530, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -d '' (null-separated records) (line 530)
E           
E           stdout mismatch:
E             expected: 'v1=a,b,c\nv1=d v2=e,f\nv1=g v2=h v3=i'
E             actual:   'v1=\nv1= v2=\nv1= v2= v3='
E           
E           Expected stdout: 'v1=a,b,c\nv1=d v2=e,f\nv1=g v2=h v3=i'
E           Actual stdout:   'v1=\nv1= v2=\nv1= v2= v3=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf 'a,b,c\0d,e,f\0g,h,i' | {
E             IFS=,
E             read -d '' v1
E             echo "v1=$v1"
E             read -d '' v1 v2
E             echo "v1=$v1 v2=$v2"
E             read -d '' v1 v2 v3
E             echo "v1=$v1 v2=$v2 v3=$v3"
E           }
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -rd[L551]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f61b0>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read -rd', script='read -rd \'\' var <<EOF\nfoo\nbar\nEOF\necho "$var"', assertions=[Assertion(type='st...s=None, variant=None), Assertion(type='stdout', value='', shells=['dash'], variant='N-I')], line_number=551, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -rd (line 551)
E           
E           stdout mismatch:
E             expected: 'foo\nbar'
E             actual:   ''
E           
E           Expected stdout: 'foo\nbar'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           read -rd '' var <<EOF
E           foo
E           bar
E           EOF
E           echo "$var"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -d when there's no delimiter[L565]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f6270>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name="read -d when there's no delimiter", script='{ read -d : part\n  echo $part $?\n  read -d : part\n  echo...ne, variant=None), Assertion(type='stdout', value='2\n2', shells=['dash'], variant='N-I')], line_number=565, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -d when there's no delimiter (line 565)
E           
E           stdout mismatch:
E             expected: 'foo 0\nbar 1'
E             actual:   '1\n1'
E           
E           Expected stdout: 'foo 0\nbar 1'
E           Actual stdout:   '1\n1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           { read -d : part
E             echo $part $?
E             read -d : part
E             echo $part $?
E           } <<EOF
E           foo:bar
E           EOF
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -t 0 tests if input is available[L582]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f6330>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read -t 0 tests if input is available', script='case $SH in dash|zsh|mksh) exit ;; esac\n\n# is there i...), Assertion(type='stdout-json', value='', shells=['dash', 'zsh', 'mksh'], variant='N-I')], line_number=582, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -t 0 tests if input is available (line 582)
E           
E           stdout mismatch:
E             expected: '0\n0\nreply=\n0'
E             actual:   '1\n1\nreply=foo\n0'
E           
E           Expected stdout: '0\n0\nreply=\n0'
E           Actual stdout:   '1\n1\nreply=foo\n0\n'
E           Expected stderr: None
E           Actual stderr:   'bash: /dev/null: No such file or directory\nbash: /dev/null: No such file or directory\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|zsh|mksh) exit ;; esac
E           
E           # is there input available?
E           read -t 0 < /dev/null
E           echo $?
E           
E           # floating point
E           read -t 0.0 < /dev/null
E           echo $?
E           
E           # floating point
E           echo foo | { read -t 0; echo reply=$REPLY; }
E           echo $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -u[L634]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f6570>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read -u', script='case $SH in dash|mksh) exit ;; esac\n\n# file descriptor\nread -u 3 3<<EOF\nhi\nEOF\n...nt=None), Assertion(type='stdout-json', value='', shells=['dash', 'mksh'], variant='N-I')], line_number=634, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -u (line 634)
E           
E           stdout mismatch:
E             expected: 'reply=hi'
E             actual:   'reply='
E           
E           Expected stdout: 'reply=hi'
E           Actual stdout:   'reply=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|mksh) exit ;; esac
E           
E           # file descriptor
E           read -u 3 3<<EOF
E           hi
E           EOF
E           echo reply=$REPLY
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -u -s[L657]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f66f0>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read -u -s', script='case $SH in dash|mksh) exit ;; esac\n\n# file descriptor\nread -s -u 3 3<<EOF\nhi\...nt=None), Assertion(type='stdout-json', value='', shells=['dash', 'mksh'], variant='N-I')], line_number=657, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -u -s (line 657)
E           
E           stdout mismatch:
E             expected: 'reply=hi'
E             actual:   'reply='
E           
E           Expected stdout: 'reply=hi'
E           Actual stdout:   'reply=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|mksh) exit ;; esac
E           
E           # file descriptor
E           read -s -u 3 3<<EOF
E           hi
E           EOF
E           echo reply=$REPLY
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -u 3 -d 5[L670]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f67b0>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read -u 3 -d 5', script='case $SH in dash|mksh) exit ;; esac\n\n# file descriptor\nread -u 3 -d 5 3<<EO...nt=None), Assertion(type='stdout-json', value='', shells=['dash', 'mksh'], variant='N-I')], line_number=670, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -u 3 -d 5 (line 670)
E           
E           stdout mismatch:
E             expected: 'reply=1234'
E             actual:   'reply='
E           
E           Expected stdout: 'reply=1234'
E           Actual stdout:   'reply=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|mksh) exit ;; esac
E           
E           # file descriptor
E           read -u 3 -d 5 3<<EOF
E           123456789
E           EOF
E           echo reply=$REPLY
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -u 3 -d b -N 6[L683]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f6870>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read -u 3 -d b -N 6', script='case $SH in ash|zsh) exit ;; esac\n\n# file descriptor\nread -u 3 -d b -N...mksh'], variant='BUG'), Assertion(type='status', value=2, shells=['mksh'], variant='BUG')], line_number=683, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -u 3 -d b -N 6 (line 683)
E           
E           stdout mismatch:
E             expected: 'reply=abab\nreply=ab'
E             actual:   'reply=\nreply='
E           
E           Expected stdout: 'reply=abab\nreply=ab'
E           Actual stdout:   'reply=\nreply=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in ash|zsh) exit ;; esac
E           
E           # file descriptor
E           read -u 3 -d b -N 4 3<<EOF
E           ababababa
E           EOF
E           echo reply=$REPLY
E           # test end on EOF
E           read -u 3 -d b -N 6 3<<EOF
E           ab
E           EOF
E           echo reply=$REPLY
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -N doesn't respect delimiter, while read -n does[L705]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f6930>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name="read -N doesn't respect delimiter, while read -n does", script='case $SH in dash|zsh|ash) exit ;; esac\...'), Assertion(type='stdout-json', value='', shells=['dash', 'zsh', 'ash'], variant='N-I')], line_number=705, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -N doesn't respect delimiter, while read -n does (line 705)
E           
E           stdout mismatch:
E             expected: 'foo\nfooba'
E             actual:   'foo\nfoo'
E           
E           Expected stdout: 'foo\nfooba'
E           Actual stdout:   'foo\nfoo\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|zsh|ash) exit ;; esac
E           
E           echo foobar | { read -n 5 -d b; echo $REPLY; }
E           echo foobar | { read -N 5 -d b; echo $REPLY; }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read with smooshed args[L748]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f6b70>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read with smooshed args', script='echo hi | { read -rn1 var; echo var=$var; }', assertions=[Assertion(t...iant=None), Assertion(type='stdout', value='var=', shells=['dash', 'zsh'], variant='N-I')], line_number=748, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read with smooshed args (line 748)
E           
E           stdout mismatch:
E             expected: 'var=h'
E             actual:   'var=hi'
E           
E           Expected stdout: 'var=h'
E           Actual stdout:   'var=hi\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo hi | { read -rn1 var; echo var=$var; }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -r -d '' for NUL strings, e.g. find -print0[L757]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f6c30>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name="read -r -d '' for NUL strings, e.g. find -print0", script='case $SH in dash|zsh|mksh) exit ;; esac  # N...=None), Assertion(type='stdout', value='', shells=['dash', 'zsh', 'mksh'], variant='N-I')], line_number=757, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -r -d '' for NUL strings, e.g. find -print0 (line 757)
E           
E           stdout mismatch:
E             expected: '[./a\\b\\c\\d]'
E             actual:   '[./abcd]'
E           
E           Expected stdout: '[./a\\b\\c\\d]'
E           Actual stdout:   '[./abcd]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|zsh|mksh) exit ;; esac  # NOT IMPLEMENTED
E           
E           mkdir -p read0
E           cd read0
E           rm -f *
E           
E           touch a\\b\\c\\d  # -r is necessary!
E           
E           find . -type f -a -print0 | { read -r -d ''; echo "[$REPLY]"; }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::mapfile from directory (bash doesn't handle errors)[L808]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f6e70>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name="mapfile from directory (bash doesn't handle errors)", script='case $SH in dash|ash|mksh|zsh) return ;; ...rtion(type='stdout-json', value='', shells=['dash', 'ash', 'mksh', 'zsh'], variant='N-I')], line_number=808, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: mapfile from directory (bash doesn't handle errors) (line 808)
E           
E           stdout mismatch:
E             expected: 'status=0'
E             actual:   'status=1'
E           
E           Expected stdout: 'status=0'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: /tmp/dir: Is a directory\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|ash|mksh|zsh) return ;; esac  # not implemented
E           
E           mkdir -p dir
E           mapfile $x < ./dir
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -n and backslash escape[L839]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f6ff0>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read -n and backslash escape', script='case $SH in zsh) exit 99;; esac  # read -n not implemented\n\nec...'zsh'], variant='N-I'), Assertion(type='stdout', value='', shells=['zsh'], variant='N-I')], line_number=839, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -n and backslash escape (line 839)
E           
E           stdout mismatch:
E             expected: "['abcde']\n['a    ']"
E             actual:   "['']\n['']"
E           
E           Expected stdout: "['abcde']\n['a    ']"
E           Actual stdout:   "['']\n['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh) exit 99;; esac  # read -n not implemented
E           
E           echo 'a\b\c\d\e\f' | (read -n 5; argv.py "$REPLY")
E           echo 'a\ \ \ \ \ ' | (read -n 5; argv.py "$REPLY")
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -n 4 with incomplete backslash[L861]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f70b0>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read -n 4 with incomplete backslash', script='case $SH in zsh) exit 99;; esac  # read -n not implemente...'zsh'], variant='N-I'), Assertion(type='stdout', value='', shells=['zsh'], variant='N-I')], line_number=861, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -n 4 with incomplete backslash (line 861)
E           
E           stdout mismatch:
E             expected: "['abcd']\n['   x']"
E             actual:   "['']\n['']"
E           
E           Expected stdout: "['abcd']\n['   x']"
E           Actual stdout:   "['']\n['']\n"
E           Expected stderr: None
E           Actual stderr:   'bash: [abc]: command not found\nbash: [   ]: command not found\nbash: [abc]: command not found\nbash: []: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           case $SH in zsh) exit 99;; esac  # read -n not implemented
E           
E           echo 'abc\def\ghijklmn' | (read -n 4; argv.py "$REPLY")
E           echo '   \xxx\xxxxxxxx' | (read -n 4; argv.py "$REPLY")
E           
E           # bash implements "-n NUM" as number of characters
E           # ash implements "-n NUM" as number of bytes
E           ['abc']
E           ['   ']
E           # mksh implements "-n NUM" as number of bytes, and also "read" (without
E           # variable names) in mksh is equivalent to "read REPLY, i.e., consideres IFS.
E           ['abc']
E           ['']
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read -n 4 with backslash + delim[L887]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f7170>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read -n 4 with backslash + delim', script='case $SH in zsh) exit 99;; esac  # read -n not implemented\n...'zsh'], variant='N-I'), Assertion(type='stdout', value='', shells=['zsh'], variant='N-I')], line_number=887, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read -n 4 with backslash + delim (line 887)
E           
E           stdout mismatch:
E             expected: "['abcd']"
E             actual:   "['']"
E           
E           Expected stdout: "['abcd']"
E           Actual stdout:   "['']\n"
E           Expected stderr: None
E           Actual stderr:   'bash: [abc]: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           case $SH in zsh) exit 99;; esac  # read -n not implemented
E           
E           echo $'abc\\\ndefg' | (read -n 4; argv.py "$REPLY")
E           
E           # mksh and ash implements "-n NUM" as number of bytes.
E           ['abc']
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::"backslash + newline" should be swallowed regardless of "-d <delim>"[L903]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f7230>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='"backslash + newline" should be swallowed regardless of "-d <delim>"', script='printf \'%s\\n\' \'a b\\...alue="['a bc d']\n['a b,c d']\n['a bc d\\n']\n['a b,c d\\n']", shells=None, variant=None)], line_number=903, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: "backslash + newline" should be swallowed regardless of "-d <delim>" (line 903)
E           
E           stdout mismatch:
E             expected: "['a bc d']\n['a b,c d']\n['a bc d\\n']\n['a b,c d\\n']"
E             actual:   "['']\n['']\n['']\n['']"
E           
E           Expected stdout: "['a bc d']\n['a b,c d']\n['a bc d\\n']\n['a b,c d\\n']"
E           Actual stdout:   "['']\n['']\n['']\n['']\n"
E           Expected stderr: None
E           Actual stderr:   'bash: [a bc d]: command not found\nbash: [a b,c d]: command not found\nbash: [a b\\nc d]: command not found\nbash: [a bc d]: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           printf '%s\n' 'a b\' 'c d' | (read; argv.py "$REPLY")
E           printf '%s\n' 'a b\,c d'   | (read; argv.py "$REPLY")
E           printf '%s\n' 'a b\' 'c d' | (read -d ,; argv.py "$REPLY")
E           printf '%s\n' 'a b\,c d'   | (read -d ,; argv.py "$REPLY")
E           
E           # mksh/zsh swallows "backslash + delim" instead.
E           ['a bc d']
E           ['a b,c d']
E           ['a b\nc d']
E           ['a bc d']
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::IFS='x ' read -a: trailing spaces (unlimited split)[L940]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f73b0>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name="IFS='x ' read -a: trailing spaces (unlimited split)", script='case $SH in mksh|ash|dash|zsh) exit 99; e... Assertion(type='stdout', value='', shells=['mksh', 'zsh', 'dash', 'ash'], variant='N-I')], line_number=940, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS='x ' read -a: trailing spaces (unlimited split) (line 940)
E           
E           stdout mismatch:
E             expected: "['a', 'b']\n['a', 'b']\n['a', 'b']\n['a', 'b']\n['a', 'b']\n['a', 'b']\n['a', 'b', '']"
E             actual:   '[]\n[]\n[]\n[]\n[]\n[]\n[]'
E           
E           Expected stdout: "['a', 'b']\n['a', 'b']\n['a', 'b']\n['a', 'b']\n['a', 'b']\n['a', 'b']\n['a', 'b', '']"
E           Actual stdout:   '[]\n[]\n[]\n[]\n[]\n[]\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh|ash|dash|zsh) exit 99; esac
E           IFS='x '
E           echo 'a b'     | (read -a a; argv.py "${a[@]}")
E           echo 'a b '    | (read -a a; argv.py "${a[@]}")
E           echo 'a bx'    | (read -a a; argv.py "${a[@]}")
E           echo 'a bx '   | (read -a a; argv.py "${a[@]}")
E           echo 'a b x'   | (read -a a; argv.py "${a[@]}")
E           echo 'a b x '  | (read -a a; argv.py "${a[@]}")
E           echo 'a b x x' | (read -a a; argv.py "${a[@]}")
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::IFS='x ' read a b: trailing spaces (with max_split)[L964]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f7470>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name="IFS='x ' read a b: trailing spaces (with max_split)", script='echo \'hello world  test   \' | (read a b...\n['a', 'ax  x  x']\n['a', 'ax  x  x']\n['a', 'ax  x  x  a']", shells=None, variant=None)], line_number=964, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS='x ' read a b: trailing spaces (with max_split) (line 964)
E           
E           stdout mismatch:
E             expected: "['hello', 'world  test']\n-- IFS=x --\n['a', 'ax  x']\n['a', 'ax  x  x']\n['a', 'ax  x  x']\n['a', 'ax  x  x  a']"
E             actual:   "['', '']\n-- IFS=x --\n['', '']\n['', '']\n['', '']\n['', '']"
E           
E           Expected stdout: "['hello', 'world  test']\n-- IFS=x --\n['a', 'ax  x']\n['a', 'ax  x  x']\n['a', 'ax  x  x']\n['a', 'ax  x  x  a']"
E           Actual stdout:   "['', '']\n-- IFS=x --\n['', '']\n['', '']\n['', '']\n['', '']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo 'hello world  test   ' | (read a b; argv.py "$a" "$b")
E           echo '-- IFS=x --'
E           IFS='x '
E           echo 'a ax  x  '     | (read a b; argv.py "$a" "$b")
E           echo 'a ax  x  x'    | (read a b; argv.py "$a" "$b")
E           echo 'a ax  x  x  '  | (read a b; argv.py "$a" "$b")
E           echo 'a ax  x  x  a' | (read a b; argv.py "$a" "$b")
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::IFS='x ' read -a: intermediate spaces (unlimited split)[L981]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f7530>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name="IFS='x ' read -a: intermediate spaces (unlimited split)", script='case $SH in mksh|ash|dash|zsh) exit 9... Assertion(type='stdout', value='', shells=['mksh', 'zsh', 'dash', 'ash'], variant='N-I')], line_number=981, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS='x ' read -a: intermediate spaces (unlimited split) (line 981)
E           
E           stdout mismatch:
E             expected: "['a', 'b']\n['a', '', 'b']\n['a', '', '', 'b']\n['a', '', 'b']\n['a', '', 'b']\n['a', 'b']\n['a', '', 'b']\n['a', '', 'b']\n['a', '', '', 'b']"
E             actual:   '[]\n[]\n[]\n[]\n[]\n[]\n[]\n[]\n[]'
E           
E           Expected stdout: "['a', 'b']\n['a', '', 'b']\n['a', '', '', 'b']\n['a', '', 'b']\n['a', '', 'b']\n['a', 'b']\n['a', '', 'b']\n['a', '', 'b']\n['a', '', '', 'b']"
E           Actual stdout:   '[]\n[]\n[]\n[]\n[]\n[]\n[]\n[]\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh|ash|dash|zsh) exit 99; esac
E           IFS='x '
E           echo 'a x b'   | (read -a a; argv.py "${a[@]}")
E           echo 'a xx b'  | (read -a a; argv.py "${a[@]}")
E           echo 'a xxx b' | (read -a a; argv.py "${a[@]}")
E           echo 'a x xb'  | (read -a a; argv.py "${a[@]}")
E           echo 'a x x b' | (read -a a; argv.py "${a[@]}")
E           echo 'ax b'    | (read -a a; argv.py "${a[@]}")
E           echo 'ax xb'   | (read -a a; argv.py "${a[@]}")
E           echo 'ax  xb'  | (read -a a; argv.py "${a[@]}")
E           echo 'ax x xb' | (read -a a; argv.py "${a[@]}")
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::IFS='x ' incomplete backslash[L1008]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f75f0>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name="IFS='x ' incomplete backslash", script='echo \' a b \\\' | (read a; argv.py "$a")\necho \' a b \\\' | (...tdout', value="['a b']\n['a', 'b']\n['a', 'ax  x    hello']", shells=None, variant=None)], line_number=1008, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS='x ' incomplete backslash (line 1008)
E           
E           stdout mismatch:
E             expected: "['a b']\n['a', 'b']\n['a', 'ax  x    hello']"
E             actual:   "['']\n['', '']\n['', '']"
E           
E           Expected stdout: "['a b']\n['a', 'b']\n['a', 'ax  x    hello']"
E           Actual stdout:   "['']\n['', '']\n['', '']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo ' a b \' | (read a; argv.py "$a")
E           echo ' a b \' | (read a b; argv.py "$a" "$b")
E           IFS='x '
E           echo $'a ax  x    \\\nhello' | (read a b; argv.py "$a" "$b")
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::IFS='\\ ' and backslash escaping[L1019]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f76b0>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name="IFS='\\ ' and backslash escaping", script='IFS=\'\\ \'\necho "hello\\ world  test" | (read a b; argv.py...ello', 'world  test']\n['hello', ' world  test']", shells=['mksh', 'zsh'], variant='OK')], line_number=1019, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS='\ ' and backslash escaping (line 1019)
E           
E           stdout mismatch:
E             expected: "['hello world', 'test']\n['hello world  test', '']"
E             actual:   "['', '']\n['', '']"
E           
E           Expected stdout: "['hello world', 'test']\n['hello world  test', '']"
E           Actual stdout:   "['', '']\n['', '']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           IFS='\ '
E           echo "hello\ world  test" | (read a b; argv.py "$a" "$b")
E           IFS='\'
E           echo "hello\ world  test" | (read a b; argv.py "$a" "$b")
E           # In mksh/zsh, IFS='\' is stronger than backslash escaping
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::max_split and backslash escaping[L1034]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f7770>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='max_split and backslash escaping', script='echo \'Aa b \\ a\\ b\' | (read a b; argv.py "$a" "$b")\necho...a', 'b  a b']\n['Aa', 'b', ' a b']\n['Aa', 'b', ' a b', '']", shells=None, variant=None)], line_number=1034, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: max_split and backslash escaping (line 1034)
E           
E           stdout mismatch:
E             expected: "['Aa', 'b  a b']\n['Aa', 'b', ' a b']\n['Aa', 'b', ' a b', '']"
E             actual:   "['', '']\n['', '', '']\n['', '', '', '']"
E           
E           Expected stdout: "['Aa', 'b  a b']\n['Aa', 'b', ' a b']\n['Aa', 'b', ' a b', '']"
E           Actual stdout:   "['', '']\n['', '', '']\n['', '', '', '']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo 'Aa b \ a\ b' | (read a b; argv.py "$a" "$b")
E           echo 'Aa b \ a\ b' | (read a b c; argv.py "$a" "$b" "$c")
E           echo 'Aa b \ a\ b' | (read a b c d; argv.py "$a" "$b" "$c" "$d")
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::IFS=x read a b <<< xxxxxx[L1044]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f7830>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='IFS=x read a b <<< xxxxxx', script='IFS=\'x \'\necho x     | (read a b; argv.py "$a" "$b")\necho xx    ..., 'a']\n['', 'a']\n['', 'axx']\n['', 'axxx']\n['', 'axxxx']", shells=None, variant=None)], line_number=1044, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS=x read a b <<< xxxxxx (line 1044)
E           
E           stdout mismatch:
E             expected: "['', '']\n['', '']\n['', 'xx']\n['', 'xxx']\n['', 'xxxx']\n-- spaces --\n['', '']\n['', '']\n['', 'xx']\n['', 'xxx']\n['', 'xxxx']\n-- with char --\n['', 'a']\n['', 'a']\n['', 'axx']\n['', 'axxx']\n['', 'axxxx']"
E             actual:   "['', '']\n['', '']\n['', '']\n['', '']\n['', '']\n-- spaces --\n['', '']\n['', '']\n['', '']\n['', '']\n['', '']\n-- with char --\n['', '']\n['', '']\n['', '']\n['', '']\n['', '']"
E           
E           Expected stdout: "['', '']\n['', '']\n['', 'xx']\n['', 'xxx']\n['', 'xxxx']\n-- spaces --\n['', '']\n['', '']\n['', 'xx']\n['', 'xxx']\n['', 'xxxx']\n-- with char --\n['', 'a']\n['', 'a']\n['', 'axx']\n['', 'axxx']\n['', 'axxxx']"
E           Actual stdout:   "['', '']\n['', '']\n['', '']\n['', '']\n['', '']\n-- spaces --\n['', '']\n['', '']\n['', '']\n['', '']\n['', '']\n-- with char --\n['', '']\n['', '']\n['', '']\n['', '']\n['', '']\n"
E           Expected stderr: None
E           Actual stderr:   'bash: [,: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: --: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: --: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: [,: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           IFS='x '
E           echo x     | (read a b; argv.py "$a" "$b")
E           echo xx    | (read a b; argv.py "$a" "$b")
E           echo xxx   | (read a b; argv.py "$a" "$b")
E           echo xxxx  | (read a b; argv.py "$a" "$b")
E           echo xxxxx | (read a b; argv.py "$a" "$b")
E           echo '-- spaces --'
E           echo 'x    ' | (read a b; argv.py "$a" "$b")
E           echo 'xx   ' | (read a b; argv.py "$a" "$b")
E           echo 'xxx  ' | (read a b; argv.py "$a" "$b")
E           echo 'xxxx ' | (read a b; argv.py "$a" "$b")
E           echo 'xxxxx' | (read a b; argv.py "$a" "$b")
E           echo '-- with char --'
E           echo 'xa    ' | (read a b; argv.py "$a" "$b")
E           echo 'xax   ' | (read a b; argv.py "$a" "$b")
E           echo 'xaxx  ' | (read a b; argv.py "$a" "$b")
E           echo 'xaxxx ' | (read a b; argv.py "$a" "$b")
E           echo 'xaxxxx' | (read a b; argv.py "$a" "$b")
E           ['', '']
E           ['', 'x']
E           ['', 'xx']
E           ['', 'xxx']
E           ['', 'xxxx']
E           -- spaces --
E           ['', '']
E           ['', 'x']
E           ['', 'xx']
E           ['', 'xxx']
E           ['', 'xxxx']
E           -- with char --
E           ['', 'a']
E           ['', 'ax']
E           ['', 'axx']
E           ['', 'axxx']
E           ['', 'axxxx']
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read and "\\ "[L1102]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f78f0>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read and "\\ "', script='IFS=\'x \'\ncheck() { echo "$1" | (read a b; argv.py "$a" "$b"); }\n\necho \'-...]\n['', ' ']\n['', '  ']\n['', '']\n['', '']\n['', '']", shells=['mksh'], variant='BUG')], line_number=1102, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read and "\ " (line 1102)
E           
E           stdout mismatch:
E             expected: "-- xs... --\n['', '']\n['', ' ']\n['', '  ']\n['', '   ']\n-- xe... --\n['', ' ']\n['', '  ']\n['', '   ']\n['', ' ']\n['', ' ']\n['', ' ']"
E             actual:   "-- xs... --\n['', '']\n['', '']\n['', '']\n['', '']\n-- xe... --\n['', '']\n['', '']\n['', '']\n['', '']\n['', '']\n['', '']"
E           
E           Expected stdout: "-- xs... --\n['', '']\n['', ' ']\n['', '  ']\n['', '   ']\n-- xe... --\n['', ' ']\n['', '  ']\n['', '   ']\n['', ' ']\n['', ' ']\n['', ' ']"
E           Actual stdout:   "-- xs... --\n['', '']\n['', '']\n['', '']\n['', '']\n-- xe... --\n['', '']\n['', '']\n['', '']\n['', '']\n['', '']\n['', '']\n"
E           Expected stderr: None
E           Actual stderr:   'bash: --: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: --: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: [,: command not found\nbash: [,: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           IFS='x '
E           check() { echo "$1" | (read a b; argv.py "$a" "$b"); }
E           
E           echo '-- xs... --'
E           check 'x '
E           check 'x \ '
E           check 'x \ \ '
E           check 'x \ \ \ '
E           echo '-- xe... --'
E           check 'x\ '
E           check 'x\ \ '
E           check 'x\ \ \ '
E           check 'x\  '
E           check 'x\  '
E           check 'x\    '
E           
E           # check 'xx\ '
E           # check 'xx\ '
E           
E           -- xs... --
E           ['', '']
E           ['', '']
E           ['', '']
E           ['', '']
E           -- xe... --
E           ['', '']
E           ['', '']
E           ['', '']
E           ['', '']
E           ['', '']
E           ['', '']
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-read.test.sh::read bash bug[L1166]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f79b0>
test_file = 'builtin-read.test.sh'
test_case = TestCase(name='read bash bug', script='IFS=\'x \'\necho \'x\\  \\ \' | (read a b; argv.py "$a" "$b")\n[\'\', \'\']\n[\...ant='BUG'), Assertion(type='stdout', value="['', '  ']", shells=['mksh'], variant='BUG')], line_number=1166, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: read bash bug (line 1166)
E           
E           stdout mismatch:
E             expected: "['', '\\x01']"
E             actual:   "['', '']"
E           
E           Expected stdout: "['', '\\x01']"
E           Actual stdout:   "['', '']\n"
E           Expected stderr: None
E           Actual stderr:   'bash: [,: command not found\nbash: [,: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           IFS='x '
E           echo 'x\  \ ' | (read a b; argv.py "$a" "$b")
E           ['', '']
E           ['', ' ']
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-set.test.sh::set -u with undefined variable exits the interpreter[L44]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f7e30>
test_file = 'builtin-set.test.sh'
test_case = TestCase(name='set -u with undefined variable exits the interpreter', script="# non-interactive\n$SH -c 'set -u; echo ...tions=[Assertion(type='stdout', value='before\nOK\nbefore\nOK', shells=None, variant=None)], line_number=44, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set -u with undefined variable exits the interpreter (line 44)
E           
E           stdout mismatch:
E             expected: 'before\nOK\nbefore\nOK'
E             actual:   'before\nOK\nOK'
E           
E           Expected stdout: 'before\nOK\nbefore\nOK'
E           Actual stdout:   'before\nOK\nOK\n'
E           Expected stderr: None
E           Actual stderr:   'bash: x: unbound variable\nbash: -i: No such file or directory\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # non-interactive
E           $SH -c 'set -u; echo before; echo $x; echo after'
E           if test $? -ne 0; then
E             echo OK
E           fi
E           
E           # interactive
E           $SH -i -c 'set -u; echo before; echo $x; echo after'
E           if test $? -ne 0; then
E             echo OK
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-set.test.sh::set -u with undefined var in interactive shell does NOT exit the interpreter[L66]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f7ef0>
test_file = 'builtin-set.test.sh'
test_case = TestCase(name='set -u with undefined var in interactive shell does NOT exit the interpreter', script="# In bash, it ab...e='stdout', value='before\nOK\nbefore\nOK', shells=['dash', 'mksh', 'zsh'], variant='BUG')], line_number=66, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set -u with undefined var in interactive shell does NOT exit the interpreter (line 66)
E           
E           stdout mismatch:
E             expected: 'before\nOK\nbefore\nline2'
E             actual:   'before\nOK\nOK'
E           
E           Expected stdout: 'before\nOK\nbefore\nline2'
E           Actual stdout:   'before\nOK\nOK\n'
E           Expected stderr: None
E           Actual stderr:   'bash: x: unbound variable\nbash: -i: No such file or directory\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # In bash, it aborts the LINE only.  The next line is executed!
E           
E           # non-interactive
E           $SH -c 'set -u; echo before; echo $x; echo after
E           echo line2
E           '
E           if test $? -ne 0; then
E             echo OK
E           fi
E           
E           # interactive
E           $SH -i -c 'set -u; echo before; echo $x; echo after
E           echo line2
E           '
E           if test $? -ne 0; then
E             echo OK
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-set.test.sh::set -u error can break out of nested evals[L101]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1091f7fb0>
test_file = 'builtin-set.test.sh'
test_case = TestCase(name='set -u error can break out of nested evals', script='$SH -c \'\nset -u\ntest_function_2() {\n  x=$blarg...), Assertion(type='stdout', value='before\nafter', shells=['zsh', 'mksh'], variant='BUG')], line_number=101, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set -u error can break out of nested evals (line 101)
E           
E           Execution error: Expected compound command as function body at line 1, column 27
E           
E           
E           Script:
E           ---
E           $SH -c '
E           set -u
E           test_function_2() {
E             x=$blarg
E           }
E           test_function() {
E             eval "test_function_2"
E           }
E           
E           echo before
E           eval test_function
E           echo after
E           '
E           # status must be non-zero: bash uses 1, ash/dash exit 2
E           if test $? -ne 0; then
E             echo OK
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-set.test.sh::set -o lists options[L152]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921c2f0>
test_file = 'builtin-set.test.sh'
test_case = TestCase(name='set -o lists options', script="# NOTE: osh doesn't use the same format yet.\nset -o | grep -o noexec", assertions=[Assertion(type='stdout', value='noexec', shells=None, variant=None)], line_number=152, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set -o lists options (line 152)
E           
E           stdout mismatch:
E             expected: 'noexec'
E             actual:   ''
E           
E           Expected stdout: 'noexec'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # NOTE: osh doesn't use the same format yet.
E           set -o | grep -o noexec
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-set.test.sh::'set' and 'eval' round trip[L159]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921c3b0>
test_file = 'builtin-set.test.sh'
test_case = TestCase(name="'set' and 'eval' round trip", script='# NOTE: not testing arrays and associative arrays!\n_space=\'[ ]\...h'], variant='BUG'), Assertion(type='stdout', value='[ ]', shells=['zsh'], variant='BUG')], line_number=159, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: 'set' and 'eval' round trip (line 159)
E           
E           stdout mismatch:
E             expected: '[ ]\nOK\nOK\nOK\nOK\nOK'
E             actual:   '[ _unicodespace _unicodewhitespace _unicodesq _unicodebackslash_dq _unicodeunicode ]'
E           
E           Expected stdout: '[ ]\nOK\nOK\nOK\nOK\nOK'
E           Actual stdout:   '[ _unicodespace _unicodewhitespace _unicodesq _unicodebackslash_dq _unicodeunicode ]\n'
E           Expected stderr: None
E           Actual stderr:   'bash: quotes: command not found\nCode saved to /tmp/vars-bash.txt\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # NOTE: not testing arrays and associative arrays!
E           _space='[ ]'
E           _whitespace=$'[\t\r\n]'
E           _sq="'single quotes'"
E           _backslash_dq="\\ \""
E           _unicode=$'[\u03bc]'
E           
E           # Save the variables
E           varfile=$TMP/vars-$(basename $SH).txt
E           
E           set | grep '^_' > "$varfile"
E           
E           # Unset variables
E           unset _space _whitespace _sq _backslash_dq _unicode
E           echo [ $_space $_whitespace $_sq $_backslash_dq $_unicode ]
E           
E           # Restore them
E           
E           . $varfile
E           echo "Code saved to $varfile" 1>&2  # for debugging
E           
E           test "$_space" = '[ ]' && echo OK
E           test "$_whitespace" = $'[\t\r\n]' && echo OK
E           test "$_sq" = "'single quotes'" && echo OK
E           test "$_backslash_dq" = "\\ \"" && echo OK
E           test "$_unicode" = $'[\u03bc]' && echo OK
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-set.test.sh::set - - and so forth[L202]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921c470>
test_file = 'builtin-set.test.sh'
test_case = TestCase(name='set - - and so forth', script='set a b\necho "$@"\n\nset - a b\necho "$@"\n\nset -- a b\necho "$@"\n\ns...I'), Assertion(type='stdout', value='a b\na b\na b\n\n--', shells=['zsh'], variant='BUG')], line_number=202, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set - - and so forth (line 202)
E           
E           stdout mismatch:
E             expected: 'a b\na b\na b\n-\n--'
E             actual:   'a b\n- a b\na b\n- -\n--'
E           
E           Expected stdout: 'a b\na b\na b\n-\n--'
E           Actual stdout:   'a b\n- a b\na b\n- -\n--\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set a b
E           echo "$@"
E           
E           set - a b
E           echo "$@"
E           
E           set -- a b
E           echo "$@"
E           
E           set - -
E           echo "$@"
E           
E           set -- --
E           echo "$@"
E           
E           # note: zsh is different, and yash is totally different
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-set.test.sh::set - leading single dash is ignored, turns off xtrace verbose (#2364)[L241]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921c530>
test_file = 'builtin-set.test.sh'
test_case = TestCase(name='set - leading single dash is ignored, turns off xtrace verbose (#2364)', script='show_options() {\n  ca...on\nxtrace-on\n\na b c\nverbose-on\nxtrace-on\n\nx - y z', shells=['zsh'], variant='BUG')], line_number=241, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set - leading single dash is ignored, turns off xtrace verbose (#2364) (line 241)
E           
E           stdout mismatch:
E             expected: 'verbose-on\nxtrace-on\n\na b c\n\nx - y z'
E             actual:   '\n- a b c\n\nx - y z'
E           
E           Expected stdout: 'verbose-on\nxtrace-on\n\na b c\n\nx - y z'
E           Actual stdout:   '\n- a b c\n\nx - y z\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show_options() {
E             case $- in
E               *v*) echo verbose-on ;;
E             esac
E             case $- in
E               *x*) echo xtrace-on ;;
E             esac
E           }
E           
E           set -x -v
E           show_options
E           echo
E           
E           set - a b c
E           echo "$@"
E           show_options
E           echo
E           
E           # dash that's not leading is not special
E           set x - y z
E           echo "$@"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-set.test.sh::set - stops option processing like set --[L285]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921c5f0>
test_file = 'builtin-set.test.sh'
test_case = TestCase(name='set - stops option processing like set --', script='case $SH in zsh) exit ;; esac\n\nshow_options() {\n...ls=None, variant=None), Assertion(type='stdout', value='', shells=['zsh'], variant='N-I')], line_number=285, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set - stops option processing like set -- (line 285)
E           
E           stdout mismatch:
E             expected: 'argv -v'
E             actual:   'argv - -v'
E           
E           Expected stdout: 'argv -v'
E           Actual stdout:   'argv - -v\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh) exit ;; esac
E           
E           show_options() {
E             case $- in
E               *v*) echo verbose-on ;;
E             esac
E             case $- in
E               *x*) echo xtrace-on ;;
E             esac
E           }
E           
E           set -x - -v
E           
E           show_options
E           echo argv "$@"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-set.test.sh::A single + is an ignored flag; not an argument[L309]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921c6b0>
test_file = 'builtin-set.test.sh'
test_case = TestCase(name='A single + is an ignored flag; not an argument', script='case $SH in zsh) exit ;; esac\n\nshow_options(...mksh'], variant='BUG'), Assertion(type='stdout', value='', shells=['zsh'], variant='N-I')], line_number=309, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: A single + is an ignored flag; not an argument (line 309)
E           
E           stdout mismatch:
E             expected: 'plus\nverbose-on\nxtrace-on\nplus x y'
E             actual:   'plus +\nplus + -v x y'
E           
E           Expected stdout: 'plus\nverbose-on\nxtrace-on\nplus x y'
E           Actual stdout:   'plus +\nplus + -v x y\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh) exit ;; esac
E           
E           show_options() {
E             case $- in
E               *v*) echo verbose-on ;;
E             esac
E             case $- in
E               *x*) echo xtrace-on ;;
E             esac
E           }
E           
E           set +
E           echo plus "$@"
E           
E           set -x + -v x y
E           show_options
E           echo plus "$@"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-set.test.sh::set - + and + -[L344]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921c770>
test_file = 'builtin-set.test.sh'
test_case = TestCase(name='set - + and + -', script='set - +\necho "$@"\n\nset + -\necho "$@"', assertions=[Assertion(type='stdout...ariant='BUG'), Assertion(type='stdout', value='+\n', shells=['zsh', 'osh'], variant='OK')], line_number=344, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set - + and + - (line 344)
E           
E           stdout mismatch:
E             expected: '+\n+'
E             actual:   '- +\n+ -'
E           
E           Expected stdout: '+\n+'
E           Actual stdout:   '- +\n+ -\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set - +
E           echo "$@"
E           
E           set + -
E           echo "$@"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-set.test.sh::set +a stops exporting[L376]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921c8f0>
test_file = 'builtin-set.test.sh'
test_case = TestCase(name='set +a stops exporting', script='set -a\nFOO=exported\nset +a\nBAR=not_exported\nprintenv.py FOO BAR', assertions=[Assertion(type='stdout', value='exported\nNone', shells=None, variant=None)], line_number=376, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set +a stops exporting (line 376)
E           
E           stdout mismatch:
E             expected: 'exported\nNone'
E             actual:   'exported\nnot_exported'
E           
E           Expected stdout: 'exported\nNone'
E           Actual stdout:   'exported\nnot_exported\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: -a: invalid option\nbash: set: -a: invalid option\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -a
E           FOO=exported
E           set +a
E           BAR=not_exported
E           printenv.py FOO BAR
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-set.test.sh::set -o allexport (long form)[L387]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921c9b0>
test_file = 'builtin-set.test.sh'
test_case = TestCase(name='set -o allexport (long form)', script='set -o allexport\nVAR1=value1\nset +o allexport\nVAR2=value2\npr...2', assertions=[Assertion(type='stdout', value='value1\nNone', shells=None, variant=None)], line_number=387, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set -o allexport (long form) (line 387)
E           
E           stdout mismatch:
E             expected: 'value1\nNone'
E             actual:   'value1\nvalue2'
E           
E           Expected stdout: 'value1\nNone'
E           Actual stdout:   'value1\nvalue2\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: allexport: invalid option name\nbash: set: allexport: invalid option name\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -o allexport
E           VAR1=value1
E           set +o allexport
E           VAR2=value2
E           printenv.py VAR1 VAR2
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-set.test.sh::variables set before set -a are not exported[L398]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921ca70>
test_file = 'builtin-set.test.sh'
test_case = TestCase(name='variables set before set -a are not exported', script='BEFORE=before_value\nset -a\nAFTER=after_value\n...ssertions=[Assertion(type='stdout', value='None\nafter_value', shells=None, variant=None)], line_number=398, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: variables set before set -a are not exported (line 398)
E           
E           stdout mismatch:
E             expected: 'None\nafter_value'
E             actual:   'before_value\nafter_value'
E           
E           Expected stdout: 'None\nafter_value'
E           Actual stdout:   'before_value\nafter_value\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: -a: invalid option\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           BEFORE=before_value
E           set -a
E           AFTER=after_value
E           printenv.py BEFORE AFTER
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-special.test.sh::Prefix assignments persist after special builtins, like : (set -o posix)[L30]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921cd70>
test_file = 'builtin-special.test.sh'
test_case = TestCase(name='Prefix assignments persist after special builtins, like : (set -o posix)', script="case $SH in\n  bash)..., variant=None), Assertion(type='stdout', value='foo=\nz=', shells=['zsh'], variant='BUG')], line_number=30, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Prefix assignments persist after special builtins, like : (set -o posix) (line 30)
E           
E           stdout mismatch:
E             expected: 'foo=bar\nz='
E             actual:   'foo=\nz='
E           
E           Expected stdout: 'foo=bar\nz='
E           Actual stdout:   'foo=\nz=\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: posix: invalid option name\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in
E             bash) set -o posix ;;
E           esac
E           
E           foo=bar :
E           echo foo=$foo
E           
E           # Not true when you use 'builtin'
E           z=Z builtin :
E           echo z=$Z
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-special.test.sh::Prefix assignments persist after readonly, but NOT exported (set -o posix)[L52]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921ce30>
test_file = 'builtin-special.test.sh'
test_case = TestCase(name='Prefix assignments persist after readonly, but NOT exported (set -o posix)', script='# Bash only implem...e='stdout', value='foo=bar\nspam=eggs\nbar\nNone', shells=['bash', 'yash'], variant='BUG')], line_number=52, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Prefix assignments persist after readonly, but NOT exported (set -o posix) (line 52)
E           
E           stdout mismatch:
E             expected: 'foo=bar\nspam=eggs\nbar\nNone'
E             actual:   'foo=\nspam=eggs\nNone\neggs'
E           
E           Expected stdout: 'foo=bar\nspam=eggs\nbar\nNone'
E           Actual stdout:   'foo=\nspam=eggs\nNone\neggs\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: posix: invalid option name\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Bash only implements it behind the posix option
E           case $SH in
E             bash) set -o posix ;;
E           esac
E           foo=bar readonly spam=eggs
E           echo foo=$foo
E           echo spam=$spam
E           
E           # should NOT be exported
E           printenv.py foo
E           printenv.py spam
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-special.test.sh::Prefix binding for exec is a special case (versus e.g. readonly)[L79]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921cef0>
test_file = 'builtin-special.test.sh'
test_case = TestCase(name='Prefix binding for exec is a special case (versus e.g. readonly)', script="pre1=pre1 readonly x=x\npre2..., Assertion(type='stdout', value='pre1=pre1 x= pre2=pre2', shells=['yash'], variant='BUG')], line_number=79, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Prefix binding for exec is a special case (versus e.g. readonly) (line 79)
E           
E           stdout mismatch:
E             expected: 'pre1= x= pre2=pre2'
E             actual:   'pre1= x=x pre2=pre2'
E           
E           Expected stdout: 'pre1= x= pre2=pre2'
E           Actual stdout:   'pre1= x=x pre2=pre2\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           pre1=pre1 readonly x=x
E           pre2=pre2 exec sh -c 'echo pre1=$pre1 x=$x pre2=$pre2'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-special.test.sh::exec without args is a special case of the special case in some shells[L91]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921cfb0>
test_file = 'builtin-special.test.sh'
test_case = TestCase(name='exec without args is a special case of the special case in some shells', script='FOO=bar exec >& 2\nech...tion(type='stderr', value='FOO=bar', shells=['dash', 'mksh', 'ash', 'yash'], variant='OK')], line_number=91, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: exec without args is a special case of the special case in some shells (line 91)
E           
E           stderr mismatch:
E             expected: 'FOO='
E             actual:   ''
E           
E           Expected stdout: None
E           Actual stdout:   'FOO=\n'
E           Expected stderr: 'FOO='
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           FOO=bar exec >& 2
E           echo FOO=$FOO
E           #declare -p | grep FOO
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-special.test.sh::Special builtins can't be redefined as shell functions (set -o posix)[L129]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921d130>
test_file = 'builtin-special.test.sh'
test_case = TestCase(name="Special builtins can't be redefined as shell functions (set -o posix)", script='case $SH in\n  bash) se...G'), Assertion(type='stdout', value='hi\nsh func echo hi', shells=['zsh'], variant='BUG')], line_number=129, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Special builtins can't be redefined as shell functions (set -o posix) (line 129)
E           
E           stdout mismatch:
E             expected: 'hi'
E             actual:   'hi\nsh func echo hi'
E           
E           Expected stdout: 'hi'
E           Actual stdout:   'hi\nsh func echo hi\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: posix: invalid option name\n'
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in
E             bash) set -o posix ;;
E           esac
E           
E           eval 'echo hi'
E           
E           eval() {
E             echo 'sh func' "$@"
E           }
E           
E           eval 'echo hi'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-special.test.sh::bash 'type' gets confused - says 'function', but runs builtin[L228]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921d430>
test_file = 'builtin-special.test.sh'
test_case = TestCase(name="bash 'type' gets confused - says 'function', but runs builtin", script='case $SH in dash|mksh|zsh|ash|y...on(type='stdout', value='', shells=['dash', 'mksh', 'zsh', 'ash', 'yash'], variant='N-I')], line_number=228, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: bash 'type' gets confused - says 'function', but runs builtin (line 228)
E           
E           stdout mismatch:
E             expected: 'TRUE\nbuiltin\nfunction\n---\nEVAL\nbuiltin\nshell function: echo before posix\nafter posix\nfunction'
E             actual:   'TRUE\nbuiltin\nfunction\n---\nEVAL\nbuiltin\nshell function: echo before posix\nshell function: echo after posix\nfunction'
E           
E           Expected stdout: 'TRUE\nbuiltin\nfunction\n---\nEVAL\nbuiltin\nshell function: echo before posix\nafter posix\nfunction'
E           Actual stdout:   'TRUE\nbuiltin\nfunction\n---\nEVAL\nbuiltin\nshell function: echo before posix\nshell function: echo after posix\nfunction\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: posix: invalid option name\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|mksh|zsh|ash|yash) exit ;; esac
E           
E           echo TRUE
E           type -t true  # builtin
E           true() { echo true func; }
E           type -t true  # now a function
E           echo ---
E           
E           echo EVAL
E           
E           type -t eval  # builtin
E           # define function before set -o posix
E           eval() { echo "shell function: $1"; }
E           # bash runs the FUNCTION, but OSH finds the special builtin
E           # OSH doesn't need set -o posix
E           eval 'echo before posix'
E           
E           if test -n "$BASH_VERSION"; then
E             # this makes the eval definition invisible!
E             set -o posix
E           fi
E           
E           eval 'echo after posix'  # this is the builtin eval
E           # bash claims it's a function, but it's a builtin
E           type -t eval
E           
E           # it finds the function and the special builtin
E           #type -a eval
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-special.test.sh::command, builtin - both can be redefined, not special (regression)[L285]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921d4f0>
test_file = 'builtin-special.test.sh'
test_case = TestCase(name='command, builtin - both can be redefined, not special (regression)', script='case $SH in dash|ash|yash)...=None), Assertion(type='stdout', value='', shells=['dash', 'ash', 'yash'], variant='N-I')], line_number=285, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: command, builtin - both can be redefined, not special (regression) (line 285)
E           
E           stdout mismatch:
E             expected: 'b\nc\nbuiltin-redef echo b\ncommand-redef echo c'
E             actual:   'c\nbuiltin-redef echo b\ncommand-redef echo c'
E           
E           Expected stdout: 'b\nc\nbuiltin-redef echo b\ncommand-redef echo c'
E           Actual stdout:   'c\nbuiltin-redef echo b\ncommand-redef echo c\n'
E           Expected stderr: None
E           Actual stderr:   'bash: builtin: echo: not a shell builtin\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|ash|yash) exit ;; esac
E           
E           builtin echo b
E           command echo c
E           
E           builtin() {
E             echo builtin-redef "$@"
E           }
E           
E           command() {
E             echo command-redef "$@"
E           }
E           
E           builtin echo b
E           command echo c
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type -t -> keyword[L27]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921d7f0>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name='type -t -> keyword', script='type -t for time ! fi do {', assertions=[Assertion(type='stdout', value='keyword\nkeyword\nkeyword\nkeyword\nkeyword\nkeyword', shells=None, variant=None)], line_number=27, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -t -> keyword (line 27)
E           
E           Execution error: Expected command after ! at line 1, column 26
E           
E           
E           Script:
E           ---
E           type -t for time ! fi do {
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type -t doesn't find non-executable (like command -v)[L67]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921da30>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name="type -t doesn't find non-executable (like command -v)", script='PATH="$TMP:$PATH"\ntouch $TMP/non-execu...'bash'], variant='BUG'), Assertion(type='status', value=0, shells=['bash'], variant='BUG')], line_number=67, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -t doesn't find non-executable (like command -v) (line 67)
E           
E           stdout mismatch:
E             expected: 'file'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: 'file'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           PATH="$TMP:$PATH"
E           touch $TMP/non-executable
E           type -t non-executable
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type -p and -P builtin -> file[L90]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921dbb0>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name='type -p and -P builtin -> file', script='touch /tmp/{mv,tar,grep}\nchmod +x /tmp/{mv,tar,grep}\nPATH=/t...tmp/mv\n/tmp/tar\n/tmp/grep\n--\n/tmp/mv\n/tmp/tar\n/tmp/grep', shells=None, variant=None)], line_number=90, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -p and -P builtin -> file (line 90)
E           
E           stdout mismatch:
E             expected: '/tmp/mv\n/tmp/tar\n/tmp/grep\n--\n/tmp/mv\n/tmp/tar\n/tmp/grep'
E             actual:   'mv\ntar\ngrep\n--\nmv is mv\ntar is tar\ngrep is grep'
E           
E           Expected stdout: '/tmp/mv\n/tmp/tar\n/tmp/grep\n--\n/tmp/mv\n/tmp/tar\n/tmp/grep'
E           Actual stdout:   'mv\ntar\ngrep\n--\nmv is mv\ntar is tar\ngrep is grep\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch /tmp/{mv,tar,grep}
E           chmod +x /tmp/{mv,tar,grep}
E           PATH=/tmp:$PATH
E           
E           type -p mv tar grep
E           echo --
E           type -P mv tar grep
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type -a -P gives multiple files[L108]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921dc70>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name='type -a -P gives multiple files', script='touch _tmp/pwd\nchmod +x _tmp/pwd\nPATH="_tmp:/bin"\n\ntype -...sertions=[Assertion(type='stdout', value='_tmp/pwd\n/bin/pwd', shells=None, variant=None)], line_number=108, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -a -P gives multiple files (line 108)
E           
E           stdout mismatch:
E             expected: '_tmp/pwd\n/bin/pwd'
E             actual:   'bash: type: pwd: not found'
E           
E           Expected stdout: '_tmp/pwd\n/bin/pwd'
E           Actual stdout:   'bash: type: pwd: not found\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           touch _tmp/pwd
E           chmod +x _tmp/pwd
E           PATH="_tmp:/bin"
E           
E           type -a -P pwd
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type -p builtin -> not found[L121]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921dd30>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name='type -p builtin -> not found', script='type -p FOO BAR NOT_FOUND', assertions=[Assertion(type='status',...shells=None, variant=None), Assertion(type='stdout', value='', shells=None, variant=None)], line_number=121, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -p builtin -> not found (line 121)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'bash: type: FOO: not found\nbash: type: BAR: not found\nbash: type: NOT_FOUND: not found'
E           
E           Expected stdout: ''
E           Actual stdout:   'bash: type: FOO: not found\nbash: type: BAR: not found\nbash: type: NOT_FOUND: not found\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           type -p FOO BAR NOT_FOUND
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type -P builtin -> not found[L132]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921deb0>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name='type -P builtin -> not found', script='type -P FOO BAR NOT_FOUND', assertions=[Assertion(type='status',...shells=None, variant=None), Assertion(type='stdout', value='', shells=None, variant=None)], line_number=132, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -P builtin -> not found (line 132)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'bash: type: FOO: not found\nbash: type: BAR: not found\nbash: type: NOT_FOUND: not found'
E           
E           Expected stdout: ''
E           Actual stdout:   'bash: type: FOO: not found\nbash: type: BAR: not found\nbash: type: NOT_FOUND: not found\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           type -P FOO BAR NOT_FOUND
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type -P builtin -> not a file[L138]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921df70>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name='type -P builtin -> not a file', script='type -P cd type builtin command', assertions=[Assertion(type='s...shells=None, variant=None), Assertion(type='stdout', value='', shells=None, variant=None)], line_number=138, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -P builtin -> not a file (line 138)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'bash: type: cd: not found\nbash: type: type: not found\nbash: type: builtin: not found\nbash: type: command: not found'
E           
E           Expected stdout: ''
E           Actual stdout:   'bash: type: cd: not found\nbash: type: type: not found\nbash: type: builtin: not found\nbash: type: command: not found\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           type -P cd type builtin command
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type -P builtin -> not a file but file found[L144]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921e030>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name='type -P builtin -> not a file but file found', script='touch _tmp/{mv,tar,grep}\nchmod +x _tmp/{mv,tar,...Assertion(type='stdout', value='_tmp/mv\n_tmp/tar\n_tmp/grep', shells=None, variant=None)], line_number=144, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -P builtin -> not a file but file found (line 144)
E           
E           stdout mismatch:
E             expected: '_tmp/mv\n_tmp/tar\n_tmp/grep'
E             actual:   'mv is mv\ntar is tar\ngrep is grep\nbash: type: cd: not found\nbash: type: builtin: not found\nbash: type: command: not found\nbash: type: type: not found'
E           
E           Expected stdout: '_tmp/mv\n_tmp/tar\n_tmp/grep'
E           Actual stdout:   'mv is mv\ntar is tar\ngrep is grep\nbash: type: cd: not found\nbash: type: builtin: not found\nbash: type: command: not found\nbash: type: type: not found\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           touch _tmp/{mv,tar,grep}
E           chmod +x _tmp/{mv,tar,grep}
E           PATH=_tmp:$PATH
E           
E           mv () { ls; }
E           tar () { ls; }
E           grep () { ls; }
E           type -P mv tar grep cd builtin command type
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type -f builtin -> function and file exists[L164]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921e1b0>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name='type -f builtin -> function and file exists', script='touch /tmp/{mv,tar,grep}\nchmod +x /tmp/{mv,tar,g...t', value='mv is /tmp/mv\ntar is /tmp/tar\ngrep is /tmp/grep', shells=None, variant=None)], line_number=164, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -f builtin -> function and file exists (line 164)
E           
E           stdout mismatch:
E             expected: 'mv is /tmp/mv\ntar is /tmp/tar\ngrep is /tmp/grep'
E             actual:   'mv is mv\ntar is tar\ngrep is grep'
E           
E           Expected stdout: 'mv is /tmp/mv\ntar is /tmp/tar\ngrep is /tmp/grep'
E           Actual stdout:   'mv is mv\ntar is tar\ngrep is grep\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch /tmp/{mv,tar,grep}
E           chmod +x /tmp/{mv,tar,grep}
E           PATH=/tmp:$PATH
E           
E           mv () { ls; }
E           tar () { ls; }
E           grep () { ls; }
E           type -f mv tar grep
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type prints function source code[L179]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921e270>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name='type prints function source code', script='f () { echo; }\ntype -a f\necho\n\ntype f', assertions=[Asse...\nf () { echo; }\n\nf is a shell function\nf () { echo; }', shells=['osh'], variant='OK')], line_number=179, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type prints function source code (line 179)
E           
E           stdout mismatch:
E             expected: 'f is a function\nf ()\n{\n    echo\n}\n\nf is a function\nf ()\n{\n    echo\n}'
E             actual:   'f is a function\n\nf is a function'
E           
E           Expected stdout: 'f is a function\nf () \n{ \n    echo\n}\n\nf is a function\nf () \n{ \n    echo\n}'
E           Actual stdout:   'f is a function\n\nf is a function\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f () { echo; }
E           type -a f
E           echo
E           
E           type f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type -a -> file[L240]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921e7b0>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name='type -a -> file', script='touch _tmp/date\nchmod +x _tmp/date\nPATH=/bin:_tmp  # control output\n\ntype...n(type='stdout', value='date is /bin/date\ndate is _tmp/date', shells=None, variant=None)], line_number=240, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -a -> file (line 240)
E           
E           stdout mismatch:
E             expected: 'date is /bin/date\ndate is _tmp/date'
E             actual:   'date is date'
E           
E           Expected stdout: 'date is /bin/date\ndate is _tmp/date'
E           Actual stdout:   'date is date\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch _tmp/date
E           chmod +x _tmp/date
E           PATH=/bin:_tmp  # control output
E           
E           type -a date
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type -ap -> file; abbreviated[L252]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921e870>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name='type -ap -> file; abbreviated', script='touch _tmp/date\nchmod +x _tmp/date\nPATH=/bin:_tmp  # control ...rtions=[Assertion(type='stdout', value='/bin/date\n_tmp/date', shells=None, variant=None)], line_number=252, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -ap -> file; abbreviated (line 252)
E           
E           stdout mismatch:
E             expected: '/bin/date\n_tmp/date'
E             actual:   'date'
E           
E           Expected stdout: '/bin/date\n_tmp/date'
E           Actual stdout:   'date\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch _tmp/date
E           chmod +x _tmp/date
E           PATH=/bin:_tmp  # control output
E           
E           type -ap date
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type -a -> builtin and file[L263]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921e930>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name='type -a -> builtin and file', script='touch _tmp/pwd\nchmod +x _tmp/pwd\nPATH=/bin:_tmp  # control outp...ue='pwd is a shell builtin\npwd is /bin/pwd\npwd is _tmp/pwd', shells=None, variant=None)], line_number=263, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -a -> builtin and file (line 263)
E           
E           stdout mismatch:
E             expected: 'pwd is a shell builtin\npwd is /bin/pwd\npwd is _tmp/pwd'
E             actual:   'pwd is a shell builtin'
E           
E           Expected stdout: 'pwd is a shell builtin\npwd is /bin/pwd\npwd is _tmp/pwd'
E           Actual stdout:   'pwd is a shell builtin\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch _tmp/pwd
E           chmod +x _tmp/pwd
E           PATH=/bin:_tmp  # control output
E           
E           type -a pwd
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type -a -> builtin and file and shell function[L275]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921e9f0>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name='type -a -> builtin and file and shell function', script="touch _tmp/pwd\nchmod +x _tmp/pwd\nPATH=/bin:_...--\npwd is a shell builtin\npwd is /bin/pwd\npwd is _tmp/pwd', shells=None, variant=None)], line_number=275, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -a -> builtin and file and shell function (line 275)
E           
E           stdout mismatch:
E             expected: 'pwd is a shell builtin\npwd is /bin/pwd\npwd is _tmp/pwd\n---\npwd is a function\npwd ()\n{\n    echo function-too\n}\npwd is a shell builtin\npwd is /bin/pwd\npwd is _tmp/pwd\n---\npwd is a shell builtin\npwd is /bin/pwd\npwd is _tmp/pwd'
E             actual:   'pwd is a shell builtin\n---\npwd is a function\npwd is a shell builtin\n---\npwd is a shell builtin'
E           
E           Expected stdout: 'pwd is a shell builtin\npwd is /bin/pwd\npwd is _tmp/pwd\n---\npwd is a function\npwd () \n{ \n    echo function-too\n}\npwd is a shell builtin\npwd is /bin/pwd\npwd is _tmp/pwd\n---\npwd is a shell builtin\npwd is /bin/pwd\npwd is _tmp/pwd'
E           Actual stdout:   'pwd is a shell builtin\n---\npwd is a function\npwd is a shell builtin\n---\npwd is a shell builtin\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch _tmp/pwd
E           chmod +x _tmp/pwd
E           PATH=/bin:_tmp  # control output
E           
E           type -a pwd
E           echo ---
E           
E           pwd ()
E           {
E               echo function-too
E           }
E           
E           osh-normalize() {
E             sed 's/shell function/function/'
E           }
E           
E           type -a pwd | osh-normalize
E           echo ---
E           
E           type -a -f pwd | osh-normalize
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type -ap -> builtin and file; doesn't print builtin or function[L316]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921eab0>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name="type -ap -> builtin and file; doesn't print builtin or function", script='touch _tmp/pwd\nchmod +x _tmp...ons=[Assertion(type='stdout', value='/bin/pwd\n_tmp/pwd\n---', shells=None, variant=None)], line_number=316, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -ap -> builtin and file; doesn't print builtin or function (line 316)
E           
E           stdout mismatch:
E             expected: '/bin/pwd\n_tmp/pwd\n---'
E             actual:   '---'
E           
E           Expected stdout: '/bin/pwd\n_tmp/pwd\n---'
E           Actual stdout:   '---\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch _tmp/pwd
E           chmod +x _tmp/pwd
E           PATH=/bin:_tmp  # control output
E           
E           # Function is also ignored
E           pwd() { echo function-too; }
E           
E           type -ap pwd
E           echo ---
E           
E           type -p pwd
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type-bash.test.sh::type -P does not find directories (regression)[L341]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921ec30>
test_file = 'builtin-type-bash.test.sh'
test_case = TestCase(name='type -P does not find directories (regression)', script='mkdir -p _tmp\nPATH="_tmp:$PATH"\nmkdir _tmp/c...alue='_tmp/cat\nstatus=0\n/usr/bin/cat\nstatus=0', shells=['ash', 'dash'], variant='BUG')], line_number=341, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -P does not find directories (regression) (line 341)
E           
E           stdout mismatch:
E             expected: 'status=1\n/usr/bin/cat\nstatus=0'
E             actual:   'bash: type: _tmp/cat: not found\nstatus=1\ncat is cat\nstatus=0'
E           
E           Expected stdout: 'status=1\n/usr/bin/cat\nstatus=0'
E           Actual stdout:   'bash: type: _tmp/cat: not found\nstatus=1\ncat is cat\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkdir -p _tmp
E           PATH="_tmp:$PATH"
E           mkdir _tmp/cat
E           
E           type -P _tmp/cat
E           echo status=$?
E           type -P cat
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type.test.sh::type -> alias external[L17]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921edb0>
test_file = 'builtin-type.test.sh'
test_case = TestCase(name='type -> alias external', script='mkdir -p _tmp\nshopt -s expand_aliases || true  # bash\n\nalias ll=\'l...n alias for ls -l\ndate is a tracked alias for _tmp/date', shells=['mksh'], variant='BUG')], line_number=17, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -> alias external (line 17)
E           
E           Execution error: Expected '}' to close command group at line 13, column 245
E           
E           
E           Script:
E           ---
E           mkdir -p _tmp
E           shopt -s expand_aliases || true  # bash
E           
E           alias ll='ls -l'
E           
E           touch _tmp/date
E           chmod +x _tmp/date
E           PATH=_tmp:/bin
E           
E           normalize() {
E             # ignore quotes and backticks
E             # bash prints a left backtick
E             quotes='"`'\'
E             sed \
E               -e "s/[$quotes]//g" \
E               -e 's/shell function/function/' \
E               -e 's/is aliased to/is an alias for/'
E           }
E           
E           type ll date | normalize
E           
E           # Note: both procs and funcs go in var namespace?  So they don't respond to
E           # 'type'?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type.test.sh::type of relative path[L51]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921ee70>
test_file = 'builtin-type.test.sh'
test_case = TestCase(name='type of relative path', script="mkdir -p _tmp\ntouch _tmp/file _tmp/ex\nchmod +x _tmp/ex\n\ntype _tmp/f...value='_tmp/file is _tmp/file\n_tmp/ex is _tmp/ex', shells=['dash', 'ash'], variant='BUG')], line_number=51, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type of relative path (line 51)
E           
E           stdout mismatch:
E             expected: '_tmp/ex is _tmp/ex'
E             actual:   'bash: type: _tmp/file: not found\nbash: type: _tmp/ex: not found'
E           
E           Expected stdout: '_tmp/ex is _tmp/ex'
E           Actual stdout:   'bash: type: _tmp/file: not found\nbash: type: _tmp/ex: not found\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           mkdir -p _tmp
E           touch _tmp/file _tmp/ex
E           chmod +x _tmp/ex
E           
E           type _tmp/file _tmp/ex
E           
E           # dash and ash don't care if it's executable
E           # mksh
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-type.test.sh::type -> not found[L77]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921ef30>
test_file = 'builtin-type.test.sh'
test_case = TestCase(name='type -> not found', script="type zz 2>err.txt\necho status=$?\n\n# for bash and OSH: print to stderr\nf... variant='BUG'), Assertion(type='stderr', value='', shells=['dash', 'ash'], variant='BUG')], line_number=77, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: type -> not found (line 77)
E           
E           stdout mismatch:
E             expected: 'status=1\nzz: not found'
E             actual:   'bash: type: zz: not found\nstatus=1'
E           
E           Expected stdout: 'status=1\nzz: not found'
E           Actual stdout:   'bash: type: zz: not found\nstatus=1\n'
E           Expected stderr: ''
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           type zz 2>err.txt
E           echo status=$?
E           
E           # for bash and OSH: print to stderr
E           fgrep -o 'zz: not found' err.txt || true
E           
E           # zsh and mksh behave the same - status 1
E           # dash and ash behave the same - status 127
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::Export sets a global variable that persists after export -n[L19]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921f230>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='Export sets a global variable that persists after export -n', script='f() { export GLOBAL=X; }\nf\necho...ariant='N-I'), Assertion(type='stdout', value='X\nX\nX\nX', shells=['zsh'], variant='N-I')], line_number=19, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Export sets a global variable that persists after export -n (line 19)
E           
E           stdout mismatch:
E             expected: 'X\nX\nX\nNone'
E             actual:   'X\nX\nX\nX'
E           
E           Expected stdout: 'X\nX\nX\nNone'
E           Actual stdout:   'X\nX\nX\nX\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f() { export GLOBAL=X; }
E           f
E           echo $GLOBAL
E           printenv.py GLOBAL
E           export -n GLOBAL
E           echo $GLOBAL
E           printenv.py GLOBAL
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::Export a local that shadows a global[L116]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921f6b0>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='Export a local that shadows a global', script="V=global\nf() {\n  local V=local1\n  export V\n  printen...rtions=[Assertion(type='stdout', value='local1\nNone\nglobal', shells=None, variant=None)], line_number=116, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Export a local that shadows a global (line 116)
E           
E           stdout mismatch:
E             expected: 'local1\nNone\nglobal'
E             actual:   'local1\nglobal\nglobal'
E           
E           Expected stdout: 'local1\nNone\nglobal'
E           Actual stdout:   'local1\nglobal\nglobal\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           V=global
E           f() {
E             local V=local1
E             export V
E             printenv.py V
E           }
E           f
E           printenv.py V  # exported local out of scope; global isn't exported yet
E           export V
E           printenv.py V  # now it's exported
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::Unset exported variable, then define it again.  It's NOT still exported.[L139]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921f830>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name="Unset exported variable, then define it again.  It's NOT still exported.", script='export U\nU=u\nprint...ons=[Assertion(type='stdout', value='u\nNone\nnewvalue\nNone', shells=None, variant=None)], line_number=139, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Unset exported variable, then define it again.  It's NOT still exported. (line 139)
E           
E           stdout mismatch:
E             expected: 'u\nNone\nnewvalue\nNone'
E             actual:   'u\nNone\nnewvalue\nnewvalue'
E           
E           Expected stdout: 'u\nNone\nnewvalue\nNone'
E           Actual stdout:   'u\nNone\nnewvalue\nnewvalue\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           export U
E           U=u
E           printenv.py U
E           unset U
E           printenv.py U
E           U=newvalue
E           echo $U
E           printenv.py U
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::Exporting a parent func variable (dynamic scope)[L155]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921f8f0>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='Exporting a parent func variable (dynamic scope)', script='# The algorithm is to walk up the stack and ...out', value='before inner\nNone\ninner: X\nX\nafter inner\nX', shells=None, variant=None)], line_number=155, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Exporting a parent func variable (dynamic scope) (line 155)
E           
E           stdout mismatch:
E             expected: 'before inner\nNone\ninner: X\nX\nafter inner\nX'
E             actual:   'before inner\nX\ninner: X\nX\nafter inner\nX'
E           
E           Expected stdout: 'before inner\nNone\ninner: X\nX\nafter inner\nX'
E           Actual stdout:   'before inner\nX\ninner: X\nX\nafter inner\nX\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # The algorithm is to walk up the stack and export that one.
E           inner() {
E             export outer_var
E             echo "inner: $outer_var"
E             printenv.py outer_var
E           }
E           outer() {
E             local outer_var=X
E             echo "before inner"
E             printenv.py outer_var
E             inner
E             echo "after inner"
E             printenv.py outer_var
E           }
E           outer
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::can't export array (strict_array)[L193]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921fb30>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name="can't export array (strict_array)", script='shopt -s strict_array\n\ntypeset -a a\na=(1 2 3)\n\nexport ...h'], variant='OK'), Assertion(type='stdout-json', value='', shells=['osh'], variant='OK')], line_number=193, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: can't export array (strict_array) (line 193)
E           
E           stdout mismatch:
E             expected: 'None'
E             actual:   ''
E           
E           Expected stdout: 'None'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s strict_array
E           
E           typeset -a a
E           a=(1 2 3)
E           
E           export a
E           printenv.py a
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::can't export associative array (strict_array)[L213]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921fbf0>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name="can't export associative array (strict_array)", script='shopt -s strict_array\n\ntypeset -A a\na["foo"]...h'], variant='OK'), Assertion(type='stdout-json', value='', shells=['osh'], variant='OK')], line_number=213, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: can't export associative array (strict_array) (line 213)
E           
E           stdout mismatch:
E             expected: 'None'
E             actual:   ''
E           
E           Expected stdout: 'None'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s strict_array
E           
E           typeset -A a
E           a["foo"]=bar
E           
E           export a
E           printenv.py a
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::assign to readonly variable[L230]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921fcb0>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='assign to readonly variable', script='# bash doesn\'t abort unless errexit!\nreadonly foo=bar\nfoo=eggs... variant='BUG'), Assertion(type='status', value=2, shells=['dash', 'mksh'], variant='OK')], line_number=230, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: assign to readonly variable (line 230)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   'status=0'
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash doesn't abort unless errexit!
E           readonly foo=bar
E           foo=eggs
E           echo "status=$?"  # nothing happens
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::Make an existing local variable readonly[L241]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921fd70>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='Make an existing local variable readonly', script="f() {\n\tlocal x=local\n\treadonly x\n\techo $x\n\te...ant='OK'), Assertion(type='stdout', value='local\nglobal', shells=['mksh'], variant='OK')], line_number=241, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Make an existing local variable readonly (line 241)
E           
E           stdout mismatch:
E             expected: 'local'
E             actual:   'local\nstatus=0\nglobal'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: 'local'
E           Actual stdout:   'local\nstatus=0\nglobal\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E           	local x=local
E           	readonly x
E           	echo $x
E           	eval 'x=bar'  # Wrap in eval so it's not fatal
E           	echo status=$?
E           }
E           x=global
E           f
E           echo $x
E           # just-bash treats readonly assignment as fatal (matches dash)
E           
E           # mksh aborts the function, weird
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::assign to readonly variable - errexit[L273]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10921fe30>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='assign to readonly variable - errexit', script='set -o errexit\nreadonly foo=bar\nfoo=eggs\necho "statu..., variant=None), Assertion(type='status', value=2, shells=['dash', 'mksh'], variant='OK')], line_number=273, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: assign to readonly variable - errexit (line 273)
E           
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: None
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           set -o errexit
E           readonly foo=bar
E           foo=eggs
E           echo "status=$?"  # nothing happens
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::Unset a function without -f[L314]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109240230>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='Unset a function without -f', script='f() {\n  echo foo\n}\nf\nunset f\nf', assertions=[Assertion(type=...Assertion(type='stdout', value='foo\nfoo', shells=['dash', 'mksh', 'zsh'], variant='N-I')], line_number=314, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Unset a function without -f (line 314)
E           
E           stdout mismatch:
E             expected: 'foo'
E             actual:   'foo\nfoo'
E           status mismatch: expected 127, got 0
E           
E           Expected stdout: 'foo'
E           Actual stdout:   'foo\nfoo\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 127
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E             echo foo
E           }
E           f
E           unset f
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::Unset and scope (bug #653)[L342]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092403b0>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='Unset and scope (bug #653)', script='unlocal() { unset "$@"; }\n\nlevel2() {\n  local hello=yy\n\n  ech...2=\nlevel1=xx\nlevel1=\nlevel2=yy\nlevel2=', shells=['dash', 'ash', 'zsh'], variant='OK')], line_number=342, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Unset and scope (bug #653) (line 342)
E           
E           stdout mismatch:
E             expected: 'level2=yy\nlevel2=xx\nlevel1=xx\nlevel1=global\nlevel2=yy\nlevel2=global'
E             actual:   'level2=yy\nlevel2=\nlevel1=xx\nlevel1=\nlevel2=yy\nlevel2='
E           
E           Expected stdout: 'level2=yy\nlevel2=xx\nlevel1=xx\nlevel1=global\nlevel2=yy\nlevel2=global'
E           Actual stdout:   'level2=yy\nlevel2=\nlevel1=xx\nlevel1=\nlevel2=yy\nlevel2=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           unlocal() { unset "$@"; }
E           
E           level2() {
E             local hello=yy
E           
E             echo level2=$hello
E             unlocal hello
E             echo level2=$hello
E           }
E           
E           level1() {
E             local hello=xx
E           
E             level2
E           
E             echo level1=$hello
E             unlocal hello
E             echo level1=$hello
E           
E             level2
E           }
E           
E           hello=global
E           level1
E           
E           # bash, mksh, yash agree here.
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::Unset invalid variable name[L408]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109240530>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='Unset invalid variable name', script='unset %\necho status=$?\n# dash does a hard failure!', assertions...['dash'], variant='OK'), Assertion(type='status', value=2, shells=['dash'], variant='OK')], line_number=408, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Unset invalid variable name (line 408)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   'status=0'
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           unset %
E           echo status=$?
E           # dash does a hard failure!
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::Unset -f[L444]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109240770>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='Unset -f', script='foo() {\n  echo "function foo"\n}\nfoo=bar\nunset -f foo\necho foo=$foo\nfoo\necho s...ertions=[Assertion(type='stdout', value='foo=bar\nstatus=127', shells=None, variant=None)], line_number=444, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Unset -f (line 444)
E           
E           stdout mismatch:
E             expected: 'foo=bar\nstatus=127'
E             actual:   'foo=bar\nstatus=1'
E           
E           Expected stdout: 'foo=bar\nstatus=127'
E           Actual stdout:   'foo=bar\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: foo: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           foo() {
E             echo "function foo"
E           }
E           foo=bar
E           unset -f foo
E           echo foo=$foo
E           foo
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::Unset wrong type[L495]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092409b0>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='Unset wrong type', script='case $SH in mksh) exit ;; esac\n\ndeclare undef\nunset -v \'undef[1]\'\necho...mksh'], variant='N-I'), Assertion(type='status', value=2, shells=['dash'], variant='N-I')], line_number=495, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Unset wrong type (line 495)
E           
E           stdout mismatch:
E             expected: 'undef 1\nundef 1\narray 0\narray 1\nassoc 0\nassoc 0'
E             actual:   'undef 0\nundef 0\narray 0\narray 0\nassoc 0\nassoc 0'
E           
E           Expected stdout: 'undef 1\nundef 1\narray 0\narray 1\nassoc 0\nassoc 0'
E           Actual stdout:   'undef 0\nundef 0\narray 0\narray 0\nassoc 0\nassoc 0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           declare undef
E           unset -v 'undef[1]'
E           echo undef $?
E           unset -v 'undef["key"]'
E           echo undef $?
E           
E           declare a=(one two)
E           unset -v 'a[1]'
E           echo array $?
E           
E           #shopt -s strict_arith || true
E           # In OSH, the string 'key' is converted to an integer, which is 0, unless
E           # strict_arith is on, when it fails.
E           unset -v 'a["key"]'
E           echo array $?
E           
E           declare -A A=(['key']=val)
E           unset -v 'A[1]'
E           echo assoc $?
E           unset -v 'A["key"]'
E           echo assoc $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::unset -v assoc (related to issue #661)[L548]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109240a70>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='unset -v assoc (related to issue #661)', script='case $SH in dash|mksh|zsh) return ;; esac\n\ndeclare -...), Assertion(type='stdout-json', value='', shells=['dash', 'mksh', 'zsh'], variant='N-I')], line_number=548, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: unset -v assoc (related to issue #661) (line 548)
E           
E           stdout mismatch:
E             expected: '1\nkeys=1],a[1\nvals=foo\n0\nkeys=\nvals='
E             actual:   '1\nkeys="$key"\nvals=foo\n0\nkeys=\nvals='
E           
E           Expected stdout: '1\nkeys=1],a[1\nvals=foo\n0\nkeys=\nvals='
E           Actual stdout:   '1\nkeys="$key"\nvals=foo\n0\nkeys=\nvals=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|mksh|zsh) return ;; esac
E           
E           declare -A dict=()
E           key=1],a[1
E           dict["$key"]=foo
E           echo ${#dict[@]}
E           echo keys=${!dict[@]}
E           echo vals=${dict[@]}
E           
E           unset -v 'dict["$key"]'
E           echo ${#dict[@]}
E           echo keys=${!dict[@]}
E           echo vals=${dict[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::Unset array member with dynamic parsing[L587]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109240bf0>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='Unset array member with dynamic parsing', script='i=1\na=(w x y z)\nunset \'a[ i - 1 ]\' a[i+1]  # note...], variant='N-I'), Assertion(type='stdout-json', value='', shells=['zsh'], variant='N-I')], line_number=587, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Unset array member with dynamic parsing (line 587)
E           
E           stdout mismatch:
E             expected: 'status=0\nx z len=2'
E             actual:   'status=0\nw x y z len=4'
E           
E           Expected stdout: 'status=0\nx z len=2'
E           Actual stdout:   'status=0\nw x y z len=4\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           i=1
E           a=(w x y z)
E           unset 'a[ i - 1 ]' a[i+1]  # note: can't have space between a and [
E           echo status=$?
E           echo "${a[@]}" len="${#a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::Use local twice[L602]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109240cb0>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='Use local twice', script='f() {\n  local foo=bar\n  local foo\n  echo $foo\n}\nf', assertions=[Assertio...iant=None), Assertion(type='stdout', value='foo=bar\nbar', shells=['zsh'], variant='BUG')], line_number=602, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Use local twice (line 602)
E           
E           stdout mismatch:
E             expected: 'bar'
E             actual:   ''
E           
E           Expected stdout: 'bar'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E             local foo=bar
E             local foo
E             echo $foo
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::Local without variable is still unset![L615]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109240d70>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='Local without variable is still unset!', script='set -o nounset\nf() {\n  local foo\n  echo "[$foo]"\n}...['zsh'], variant='BUG'), Assertion(type='status', value=0, shells=['zsh'], variant='BUG')], line_number=615, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Local without variable is still unset! (line 615)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   '[]'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   '[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           set -o nounset
E           f() {
E             local foo
E             echo "[$foo]"
E           }
E           f
E           # zsh doesn't support nounset?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::local after readonly[L629]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109240e30>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='local after readonly', script='f() {\n  readonly y\n  local x=1 y=$(( x ))\n  echo y=$y\n}\nf\necho y=$... variant='BUG'), Assertion(type='stdout', value='y=\ny=', shells=['bash'], variant='BUG')], line_number=629, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: local after readonly (line 629)
E           
E           stdout mismatch:
E             expected: 'y=\ny='
E             actual:   'y=0\ny='
E           
E           Expected stdout: 'y=\ny='
E           Actual stdout:   'y=0\ny=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E             readonly y
E             local x=1 y=$(( x ))
E             echo y=$y
E           }
E           f
E           echo y=$y
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[builtin-vars.test.sh::unset a[-1] (bf.bash regression)[L654]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109240ef0>
test_file = 'builtin-vars.test.sh'
test_case = TestCase(name='unset a[-1] (bf.bash regression)', script='case $SH in dash|zsh) exit ;; esac\n\na=(1 2 3)\nunset a[-1]...nt='BUG'), Assertion(type='stdout-json', value='', shells=['dash', 'zsh'], variant='N-I')], line_number=654, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: unset a[-1] (bf.bash regression) (line 654)
E           
E           stdout mismatch:
E             expected: 'len=2\nlast=2\nlast=2\n1 42'
E             actual:   'len=3\nlast=3\nlast=3\n42 1 2 3'
E           
E           Expected stdout: 'len=2\nlast=2\nlast=2\n1 42'
E           Actual stdout:   'len=3\nlast=3\nlast=3\n42 1 2 3\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|zsh) exit ;; esac
E           
E           a=(1 2 3)
E           unset a[-1]
E           echo len=${#a[@]}
E           
E           echo last=${a[-1]}
E           (( last = a[-1] ))
E           echo last=$last
E           
E           (( a[-1] = 42 ))
E           echo "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[case_.test.sh::Quoted literal in glob pattern[L98]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092414f0>
test_file = 'case_.test.sh'
test_case = TestCase(name='Quoted literal in glob pattern', script='x=\'[ab].py\'\npat=\'[ab].py\'\ncase "$x" in\n  "$pat") echo m... ;;\nesac', assertions=[Assertion(type='stdout', value='match', shells=None, variant=None)], line_number=98, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Quoted literal in glob pattern (line 98)
E           
E           stdout mismatch:
E             expected: 'match'
E             actual:   ''
E           
E           Expected stdout: 'match'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x='[ab].py'
E           pat='[ab].py'
E           case "$x" in
E             "$pat") echo match ;;
E           esac
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[case_.test.sh::\\(\\) in pattern (regression)[L211]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092418b0>
test_file = 'case_.test.sh'
test_case = TestCase(name='\\(\\) in pattern (regression)', script="s='foo()'\n\ncase $s in\n  *\\(\\)) echo 'match'\nesac\n\ncase...e, variant=None), Assertion(type='stdout', value='match', shells=['dash'], variant='N-I')], line_number=211, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \(\) in pattern (regression) (line 211)
E           
E           stdout mismatch:
E             expected: 'match\nextglob'
E             actual:   'extglob'
E           
E           Expected stdout: 'match\nextglob'
E           Actual stdout:   'extglob\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           s='foo()'
E           
E           case $s in
E             *\(\)) echo 'match'
E           esac
E           
E           case $SH in dash) exit ;; esac  # not implemented
E           
E           shopt -s extglob
E           
E           case $s in
E             *(foo|bar)'()') echo 'extglob'
E           esac
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[case_.test.sh::case \\n bug regression[L234]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109241970>
test_file = 'case_.test.sh'
test_case = TestCase(name='case \\n bug regression', script='case\nin esac', assertions=[Assertion(type='stdout', value='', shells...'mksh'], variant='OK'), Assertion(type='status', value=127, shells=['zsh'], variant='OK')], line_number=234, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: case \n bug regression (line 234)
E           
E           Execution error: Expected word after 'case' at line 2, column 1
E           
E           
E           Script:
E           ---
E           case
E           in esac
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-parsing.test.sh::Redirect on control flow (ignored in OSH)[L41]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109241c70>
test_file = 'command-parsing.test.sh'
test_case = TestCase(name='Redirect on control flow (ignored in OSH)', script='rm -f _tmp/r.txt\nfor x in a b c; do\n  break > _tm...ls=None, variant=None), Assertion(type='stdout', value='NO', shells=['osh'], variant='OK')], line_number=41, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Redirect on control flow (ignored in OSH) (line 41)
E           
E           stdout mismatch:
E             expected: 'REDIRECTED'
E             actual:   'NO'
E           
E           Expected stdout: 'REDIRECTED'
E           Actual stdout:   'NO\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           rm -f _tmp/r.txt
E           for x in a b c; do
E             break > _tmp/r.txt
E           done
E           if test -f _tmp/r.txt; then
E             echo REDIRECTED
E           else
E             echo NO
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-parsing.test.sh::Redirect on control flow with ysh:all (no_parse_ignored)[L55]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109241d30>
test_file = 'command-parsing.test.sh'
test_case = TestCase(name='Redirect on control flow with ysh:all (no_parse_ignored)', script='shopt -s ysh:all\nrm -f _tmp/r.txt\n...sh'], variant='OK'), Assertion(type='stdout-json', value='', shells=['osh'], variant='OK')], line_number=55, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Redirect on control flow with ysh:all (no_parse_ignored) (line 55)
E           
E           stdout mismatch:
E             expected: 'REDIRECTED'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: 'REDIRECTED'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: shopt: ysh:all: invalid shell option name\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           shopt -s ysh:all
E           rm -f _tmp/r.txt
E           for x in a b c; do
E             break > _tmp/r.txt
E           done
E           test -f _tmp/r.txt && echo REDIRECTED
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub-ksh.test.sh::${ echo hi;}[L6]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109241df0>
test_file = 'command-sub-ksh.test.sh'
test_case = TestCase(name='${ echo hi;}', script='x=${ echo hi;}\necho "[$x]"\necho\n\n# trailing space allowed\nx=${ echo one; ec...ion(type='stdout', value='[hi]\n\n[one\ntwo]\n\n[ 3 \n 4 5 ]\n', shells=None, variant=None)], line_number=6, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${ echo hi;} (line 6)
E           
E           stdout mismatch:
E             expected: '[hi]\n\n[one\ntwo]\n\n[ 3\n 4 5 ]'
E             actual:   '[]\n\n[]\n\n[]\n\n[]'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: '[hi]\n\n[one\ntwo]\n\n[ 3 \n 4 5 ]\n'
E           Actual stdout:   '[]\n\n[]\n\n[]\n\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           x=${ echo hi;}
E           echo "[$x]"
E           echo
E           
E           # trailing space allowed
E           x=${ echo one; echo two; }
E           echo "[$x]"
E           echo
E           
E           myfunc() {
E             echo ' 3 '
E             echo ' 4 5 '
E           }
E           
E           x=${ myfunc;}
E           echo "[$x]"
E           echo
E           
E           # SYNTAX ERROR
E           x=${myfunc;}
E           echo "[$x]"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub-ksh.test.sh::${ echo hi }  without semi-colon[L43]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109241eb0>
test_file = 'command-sub-ksh.test.sh'
test_case = TestCase(name='${ echo hi }  without semi-colon', script='x=${ echo no-semi }\necho "[$x]"\n\nx=${ echo no-space}\nech...=None), Assertion(type='stdout', value='[no-semi]\n[no-space]', shells=None, variant=None)], line_number=43, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${ echo hi }  without semi-colon (line 43)
E           
E           stdout mismatch:
E             expected: '[no-semi]\n[no-space]'
E             actual:   '[]\n[]'
E           status mismatch: expected 127, got 0
E           
E           Expected stdout: '[no-semi]\n[no-space]'
E           Actual stdout:   '[]\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 127
E           Actual status:   0
E           
E           Script:
E           ---
E           x=${ echo no-semi }
E           echo "[$x]"
E           
E           x=${ echo no-space}
E           echo "[$x]"
E           
E           # damn I wanted to take this over!  mksh executes it!
E           x=${ ~/ysh-tilde-sub }
E           
E           # echo ${ ~/ysh-tilde-sub }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub-ksh.test.sh::${|REPLY=hi}[L63]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109241f70>
test_file = 'command-sub-ksh.test.sh'
test_case = TestCase(name='${|REPLY=hi}', script='x=${|y=" reply var "; REPLY=$y}\necho "[$x]"\necho\n\necho \'  from file  \' > t...ssertion(type='stdout', value='[ reply var ]\n\n[from file]\n', shells=None, variant=None)], line_number=63, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${|REPLY=hi} (line 63)
E           
E           stdout mismatch:
E             expected: '[ reply var ]\n\n[from file]'
E             actual:   '[]\n\n[]\n\n[]'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: '[ reply var ]\n\n[from file]\n'
E           Actual stdout:   '[]\n\n[]\n\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           x=${|y=" reply var "; REPLY=$y}
E           echo "[$x]"
E           echo
E           
E           echo '  from file  ' > tmp.txt
E           
E           x=${|read -r < tmp.txt}
E           echo "[$x]"
E           echo
E           
E           # SYNTAX ERROR
E           x=${ |REPLY=zz}
E           echo "[$x]"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub-ksh.test.sh::for loop / case[L89]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109242030>
test_file = 'command-sub-ksh.test.sh'
test_case = TestCase(name='for loop / case', script='x=${ for i in a b; do echo -$i-; done; }\necho "$x"\n\ny=${|for i in a b; do ...on(type='stdout', value='-a-\n-b-\n-a--b-\n\nsh-case\nsh-case', shells=None, variant=None)], line_number=89, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: for loop / case (line 89)
E           
E           stdout mismatch:
E             expected: '-a-\n-b-\n-a--b-\n\nsh-case\nsh-case'
E             actual:   ''
E           
E           Expected stdout: '-a-\n-b-\n-a--b-\n\nsh-case\nsh-case'
E           Actual stdout:   '\n\n\n\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x=${ for i in a b; do echo -$i-; done; }
E           echo "$x"
E           
E           y=${|for i in a b; do REPLY+="-$i-"; done; }
E           echo "$y"
E           
E           echo
E           
E           x2=${ case foo in foo) echo sh-case ;; esac; }
E           echo "$x2"
E           
E           y2=${|case foo in foo) REPLY=sh-case ;; esac; }
E           echo "$y2"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub.test.sh::case in subshell[L7]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092421b0>
test_file = 'command-sub.test.sh'
test_case = TestCase(name='case in subshell', script='# Hm this subhell has to know about the closing ) and stuff like that.\n# ca... ;; esac)', assertions=[Assertion(type='stdout', value='letter', shells=None, variant=None)], line_number=7, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: case in subshell (line 7)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 5, column 46
E           
E           
E           Script:
E           ---
E           # Hm this subhell has to know about the closing ) and stuff like that.
E           # case_clause is a compound_command, which is a command.  And a subshell
E           # takes a compound_list, which is a list of terms, which has and_ors in them
E           # ... which eventually boils down to a command.
E           echo $(foo=a; case $foo in [0-9]) echo number;; [a-z]) echo letter ;; esac)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub.test.sh::Nested backticks[L28]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092424b0>
test_file = 'command-sub.test.sh'
test_case = TestCase(name='Nested backticks', script='# Inner `` are escaped!  Not sure how to do triple..  Seems like an unlikely...t`', assertions=[Assertion(type='stdout', value='000000-first', shells=None, variant=None)], line_number=28, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Nested backticks (line 28)
E           
E           stdout mismatch:
E             expected: '000000-first'
E             actual:   '000000-first _keep'
E           
E           Expected stdout: '000000-first'
E           Actual stdout:   '000000-first _keep\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Inner `` are escaped!  Not sure how to do triple..  Seems like an unlikely
E           # use case.  Not sure if I even want to support this!
E           echo X > $TMP/000000-first
E           echo `\`echo -n l; echo -n s\` $TMP | grep 000000-first`
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub.test.sh::Making keyword out of command sub should NOT work[L40]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109242630>
test_file = 'command-sub.test.sh'
test_case = TestCase(name='Making keyword out of command sub should NOT work', script='$(echo f)$(echo or) i in a b c; do echo $i;...ells=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=40, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Making keyword out of command sub should NOT work (line 40)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 1, column 33
E           
E           
E           Script:
E           ---
E           $(echo f)$(echo or) i in a b c; do echo $i; done
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub.test.sh::Command Sub trailing newline removed[L66]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109242930>
test_file = 'command-sub.test.sh'
test_case = TestCase(name='Command Sub trailing newline removed', script='s=$(python2 -c \'print("ab\\ncd\\n")\')\nargv.py "$s"', assertions=[Assertion(type='stdout', value="['ab\\ncd']", shells=None, variant=None)], line_number=66, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Command Sub trailing newline removed (line 66)
E           
E           stdout mismatch:
E             expected: "['ab\\ncd']"
E             actual:   "['']"
E           
E           Expected stdout: "['ab\\ncd']"
E           Actual stdout:   "['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           s=$(python2 -c 'print("ab\ncd\n")')
E           argv.py "$s"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub.test.sh::Command Sub trailing whitespace not removed[L72]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092429f0>
test_file = 'command-sub.test.sh'
test_case = TestCase(name='Command Sub trailing whitespace not removed', script='s=$(python2 -c \'print("ab\\ncd\\n ")\')\nargv.py..., assertions=[Assertion(type='stdout', value="['ab\\ncd\\n ']", shells=None, variant=None)], line_number=72, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Command Sub trailing whitespace not removed (line 72)
E           
E           stdout mismatch:
E             expected: "['ab\\ncd\\n ']"
E             actual:   "['']"
E           
E           Expected stdout: "['ab\\ncd\\n ']"
E           Actual stdout:   "['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           s=$(python2 -c 'print("ab\ncd\n ")')
E           argv.py "$s"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub.test.sh::Command Sub and exit code[L78]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109242ab0>
test_file = 'command-sub.test.sh'
test_case = TestCase(name='Command Sub and exit code', script="# A command resets the exit code, but an assignment doesn't.\necho ...cho $?", assertions=[Assertion(type='stdout', value='x\n0\n33', shells=None, variant=None)], line_number=78, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Command Sub and exit code (line 78)
E           
E           stdout mismatch:
E             expected: 'x\n0\n33'
E             actual:   'x\n0\n0'
E           
E           Expected stdout: 'x\n0\n33'
E           Actual stdout:   'x\n0\n0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # A command resets the exit code, but an assignment doesn't.
E           echo $(echo x; exit 33)
E           echo $?
E           x=$(echo x; exit 33)
E           echo $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub.test.sh::Double Quotes in Command Sub in Double Quotes[L105]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109242c30>
test_file = 'command-sub.test.sh'
test_case = TestCase(name='Double Quotes in Command Sub in Double Quotes', script='# virtualenv\'s bin/activate uses this.\n# This...n(type='stdout', value='x hi\nx hi\nx "hi"\nx hi\nx hi\nx hi', shells=None, variant=None)], line_number=105, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Double Quotes in Command Sub in Double Quotes (line 105)
E           
E           stdout mismatch:
E             expected: 'x hi\nx hi\nx "hi"\nx hi\nx hi\nx hi'
E             actual:   'x hi\nx hi\nx "hi"\nx hi\nx hi\nx "hi"'
E           
E           Expected stdout: 'x hi\nx hi\nx "hi"\nx hi\nx hi\nx hi'
E           Actual stdout:   'x hi\nx hi\nx "hi"\nx hi\nx hi\nx "hi"\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # virtualenv's bin/activate uses this.
E           # This is weird!  Double quotes within `` is different than double quotes
E           # within $()!  All shells agree.
E           # I think this is related to the nested backticks case!
E           echo "x $(echo hi)"
E           echo "x $(echo "hi")"
E           echo "x $(echo \"hi\")"
E           echo "x `echo hi`"
E           echo "x `echo "hi"`"
E           echo "x `echo \"hi\"`"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub.test.sh::Quoting $ within ``[L145]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109242e70>
test_file = 'command-sub.test.sh'
test_case = TestCase(name='Quoting $ within ``', script='echo 1 `echo $`\necho 2 `echo \\$`\necho 3 `echo \\\\$`\necho 4 `echo \\\...s=[Assertion(type='stdout', value='1 $\n2 $\n3 $\n4 $\n5 \\$', shells=None, variant=None)], line_number=145, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Quoting $ within `` (line 145)
E           
E           stdout mismatch:
E             expected: '1 $\n2 $\n3 $\n4 $\n5 \\$'
E             actual:   '1 $\n2 $\n3 $\n4 $\n5 $'
E           
E           Expected stdout: '1 $\n2 $\n3 $\n4 $\n5 \\$'
E           Actual stdout:   '1 $\n2 $\n3 $\n4 $\n5 $\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo 1 `echo $`
E           echo 2 `echo \$`
E           echo 3 `echo \\$`
E           echo 4 `echo \\\$`
E           echo 5 `echo \\\\$`
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub.test.sh::Quoting $ within `` within double quotes[L159]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109242f30>
test_file = 'command-sub.test.sh'
test_case = TestCase(name='Quoting $ within `` within double quotes', script='echo "1 `echo $`"\necho "2 `echo \\$`"\necho "3 `ech...s=[Assertion(type='stdout', value='1 $\n2 $\n3 $\n4 $\n5 \\$', shells=None, variant=None)], line_number=159, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Quoting $ within `` within double quotes (line 159)
E           
E           stdout mismatch:
E             expected: '1 $\n2 $\n3 $\n4 $\n5 \\$'
E             actual:   '1 $\n2 $\n3 $\n4 $\n5 $'
E           
E           Expected stdout: '1 $\n2 $\n3 $\n4 $\n5 \\$'
E           Actual stdout:   '1 $\n2 $\n3 $\n4 $\n5 $\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo "1 `echo $`"
E           echo "2 `echo \$`"
E           echo "3 `echo \\$`"
E           echo "4 `echo \\\$`"
E           echo "5 `echo \\\\$`"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub.test.sh::Quoting non-special characters within ``[L214]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092432f0>
test_file = 'command-sub.test.sh'
test_case = TestCase(name='Quoting non-special characters within ``', script='echo [1 `echo \\z]`\necho [2 `echo \\\\z]`\necho [3 ...sertion(type='stdout', value='[1 z]\n[2 z]\n[3 \\z]\n[4 \\z]', shells=None, variant=None)], line_number=214, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Quoting non-special characters within `` (line 214)
E           
E           stdout mismatch:
E             expected: '[1 z]\n[2 z]\n[3 \\z]\n[4 \\z]'
E             actual:   '[1 z]\n[2 z]\n[3 z]\n[4 z]'
E           
E           Expected stdout: '[1 z]\n[2 z]\n[3 \\z]\n[4 \\z]'
E           Actual stdout:   '[1 z]\n[2 z]\n[3 z]\n[4 z]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo [1 `echo \z]`
E           echo [2 `echo \\z]`
E           echo [3 `echo \\\z]`
E           echo [4 `echo \\\\z]`
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub.test.sh::Quoting non-special characters within `` within double quotes[L226]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092433b0>
test_file = 'command-sub.test.sh'
test_case = TestCase(name='Quoting non-special characters within `` within double quotes', script='echo "[1 `echo \\z`]"\necho "[2...sertion(type='stdout', value='[1 z]\n[2 z]\n[3 \\z]\n[4 \\z]', shells=None, variant=None)], line_number=226, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Quoting non-special characters within `` within double quotes (line 226)
E           
E           stdout mismatch:
E             expected: '[1 z]\n[2 z]\n[3 \\z]\n[4 \\z]'
E             actual:   '[1 z]\n[2 z]\n[3 z]\n[4 z]'
E           
E           Expected stdout: '[1 z]\n[2 z]\n[3 \\z]\n[4 \\z]'
E           Actual stdout:   '[1 z]\n[2 z]\n[3 z]\n[4 z]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo "[1 `echo \z`]"
E           echo "[2 `echo \\z`]"
E           echo "[3 `echo \\\z`]"
E           echo "[4 `echo \\\\z`]"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub.test.sh::More levels of double quotes in backticks[L248]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109243530>
test_file = 'command-sub.test.sh'
test_case = TestCase(name='More levels of double quotes in backticks', script='# Shells don\'t agree here, some of them give you f...', value='\x0coo\\ -\n\x0coo\\ -\n\\"foo\\" -\n', shells=['dash', 'mksh'], variant='BUG')], line_number=248, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: More levels of double quotes in backticks (line 248)
E           
E           stdout mismatch:
E             expected: '\\foo\\ -\n\\foo\\ -\n\\"foo\\" -'
E             actual:   '"foo" -\n"foo" -\n\\foo\\\\ -'
E           
E           Expected stdout: '\\foo\\ -\n\\foo\\ -\n\\"foo\\" -'
E           Actual stdout:   '"foo" -\n"foo" -\n\\foo\\\\ -\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Shells don't agree here, some of them give you form feeds!
E           # There are two levels of processing I don't understand.
E           
E           #echo BUG
E           #exit
E           
E           echo `echo \\\"foo\\\"` -
E           echo `echo \\\\"foo\\\\"` -
E           echo `echo \\\\\"foo\\\\\"` -
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command-sub.test.sh::Syntax errors with double quotes within backticks[L268]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092435f0>
test_file = 'command-sub.test.sh'
test_case = TestCase(name='Syntax errors with double quotes within backticks', script='# bash does print syntax errors but somehow..., Assertion(type='stdout', value='\nstatus=0\n\nstatus=0', shells=['bash'], variant='OK')], line_number=268, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Syntax errors with double quotes within backticks (line 268)
E           
E           stdout mismatch:
E             expected: '\nstatus=0\n\nstatus=0'
E             actual:   '\nstatus=0\n"\nstatus=0'
E           
E           Expected stdout: '\nstatus=0\n\nstatus=0'
E           Actual stdout:   '\nstatus=0\n"\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash does print syntax errors but somehow it exits 0
E           
E           $SH -c 'echo `echo "`'
E           echo status=$?
E           $SH -c 'echo `echo \\\\"`'
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[command_.test.sh::Command block[L5]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109243770>
test_file = 'command_.test.sh'
test_case = TestCase(name='Command block', script='PATH=/bin\n\n{ which ls; }', assertions=[Assertion(type='stdout', value='/bin/ls', shells=None, variant=None)], line_number=5, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Command block (line 5)
E           
E           stdout mismatch:
E             expected: '/bin/ls'
E             actual:   '/usr/bin/ls'
E           
E           Expected stdout: '/bin/ls'
E           Actual stdout:   '/usr/bin/ls\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           PATH=/bin
E           
E           { which ls; }
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[command_.test.sh::Permission denied[L11]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109243830>
test_file = 'command_.test.sh'
test_case = TestCase(name='Permission denied', script='touch $TMP/text-file\n$TMP/text-file', assertions=[Assertion(type='status', value=126, shells=None, variant=None)], line_number=11, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Permission denied (line 11)
E           
E           status mismatch: expected 126, got 1
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: /tmp/text-file: command not found\n'
E           Expected status: 126
E           Actual status:   1
E           
E           Script:
E           ---
E           touch $TMP/text-file
E           $TMP/text-file
E           ---

tests/spec_tests/test_spec.py:218: Failed
______ TestBashSpecTests.test_spec_case[command_.test.sh::Not a dir[L16]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092438f0>
test_file = 'command_.test.sh'
test_case = TestCase(name='Not a dir', script='$TMP/not-a-dir/text-file', assertions=[Assertion(type='status', value=127, shells=None, variant=None)], line_number=16, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Not a dir (line 16)
E           
E           status mismatch: expected 127, got 1
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: /tmp/not-a-dir/text-file: command not found\n'
E           Expected status: 127
E           Actual status:   1
E           
E           Script:
E           ---
E           $TMP/not-a-dir/text-file
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[command_.test.sh::Name too long[L20]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092439b0>
test_file = 'command_.test.sh'
test_case = TestCase(name='Name too long', script='./01234567890123456789012345678901234567890123456789012345678901234567890123456...'dash'], variant='OK'), Assertion(type='status', value=126, shells=['bash'], variant='OK')], line_number=20, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Name too long (line 20)
E           
E           status mismatch: expected one of [127, 126], got 1
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: ./0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789: command not found\n'
E           Expected status: 127
E           Actual status:   1
E           
E           Script:
E           ---
E           ./0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command_.test.sh::File with no shebang is executed[L32]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109243b30>
test_file = 'command_.test.sh'
test_case = TestCase(name='File with no shebang is executed', script="# most shells execute /bin/sh; bash may execute itself\necho..., shells=None, variant=None), Assertion(type='status', value=0, shells=None, variant=None)], line_number=32, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: File with no shebang is executed (line 32)
E           
E           stdout mismatch:
E             expected: 'hi'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: 'hi'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: /tmp/no-shebang: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           # most shells execute /bin/sh; bash may execute itself
E           echo 'echo hi' > $TMP/no-shebang
E           chmod +x $TMP/no-shebang
E           $SH -c '$TMP/no-shebang'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command_.test.sh::File with relative path and no shebang is executed[L40]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109243bf0>
test_file = 'command_.test.sh'
test_case = TestCase(name='File with relative path and no shebang is executed', script='cd $TMP\necho \'echo hi\' > no-shebang\nch..., shells=None, variant=None), Assertion(type='status', value=0, shells=None, variant=None)], line_number=40, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: File with relative path and no shebang is executed (line 40)
E           
E           stdout mismatch:
E             expected: 'hi'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: 'hi'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: ./no-shebang: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           cd $TMP
E           echo 'echo hi' > no-shebang
E           chmod +x no-shebang
E           "$SH" -c './no-shebang'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command_.test.sh::File in relative subdirectory and no shebang is executed[L48]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109243cb0>
test_file = 'command_.test.sh'
test_case = TestCase(name='File in relative subdirectory and no shebang is executed', script='cd $TMP\nmkdir -p test-no-shebang\ne..., shells=None, variant=None), Assertion(type='status', value=0, shells=None, variant=None)], line_number=48, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: File in relative subdirectory and no shebang is executed (line 48)
E           
E           stdout mismatch:
E             expected: 'hi'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: 'hi'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: test-no-shebang/script: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           cd $TMP
E           mkdir -p test-no-shebang
E           echo 'echo hi' > test-no-shebang/script
E           chmod +x test-no-shebang/script
E           "$SH" -c 'test-no-shebang/script'
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[command_.test.sh::$PATH lookup[L57]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109243d70>
test_file = 'command_.test.sh'
test_case = TestCase(name='$PATH lookup', script="cd $TMP\nmkdir -p one two\necho 'echo one' > one/mycmd\necho 'echo two' > two/my...two'\nmycmd", assertions=[Assertion(type='stdout', value='one', shells=None, variant=None)], line_number=57, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $PATH lookup (line 57)
E           
E           stdout mismatch:
E             expected: 'one'
E             actual:   ''
E           
E           Expected stdout: 'one'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: mycmd: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           cd $TMP
E           mkdir -p one two
E           echo 'echo one' > one/mycmd
E           echo 'echo two' > two/mycmd
E           chmod +x one/mycmd two/mycmd
E           
E           PATH='one:two'
E           mycmd
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command_.test.sh::filling $PATH cache, then insert the same command earlier in cache[L70]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109243e30>
test_file = 'command_.test.sh'
test_case = TestCase(name='filling $PATH cache, then insert the same command earlier in cache', script='cd $TMP\nPATH="one:two:$PA...riant=None), Assertion(type='stdout', value='two\none\none', shells=['zsh'], variant='OK')], line_number=70, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: filling $PATH cache, then insert the same command earlier in cache (line 70)
E           
E           stdout mismatch:
E             expected: 'two\ntwo\none'
E             actual:   ''
E           
E           Expected stdout: 'two\ntwo\none'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: mycmd: command not found\nbash: mycmd: command not found\nbash: hash: command not found\nbash: mycmd: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           cd $TMP
E           PATH="one:two:$PATH"
E           mkdir -p one two
E           rm -f one/* two/*
E           echo 'echo two' > two/mycmd
E           chmod +x two/mycmd
E           mycmd
E           
E           # Insert earlier in the path
E           echo 'echo one' > one/mycmd
E           chmod +x one/mycmd
E           mycmd  # still runs the cached 'two'
E           
E           # clear the cache
E           hash -r
E           mycmd  # now it runs the new 'one'
E           
E           
E           # zsh doesn't do caching!
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command_.test.sh::filling $PATH cache, then deleting command[L101]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109243ef0>
test_file = 'command_.test.sh'
test_case = TestCase(name='filling $PATH cache, then deleting command', script='cd $TMP\nPATH="one:two:$PATH"\nmkdir -p one two\nr...ype='stdout', value='two\nstatus=0\none\nstatus=0', shells=['zsh', 'mksh'], variant='OK')], line_number=101, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: filling $PATH cache, then deleting command (line 101)
E           
E           stdout mismatch:
E             expected: 'two\nstatus=0\nstatus=127'
E             actual:   'status=1\nstatus=1'
E           
E           Expected stdout: 'two\nstatus=0\nstatus=127'
E           Actual stdout:   'status=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: mycmd: command not found\nbash: mycmd: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           cd $TMP
E           PATH="one:two:$PATH"
E           mkdir -p one two
E           rm -f one/mycmd two/mycmd
E           
E           echo 'echo two' > two/mycmd
E           chmod +x two/mycmd
E           mycmd
E           echo status=$?
E           
E           # Insert earlier in the path
E           echo 'echo one' > one/mycmd
E           chmod +x one/mycmd
E           rm two/mycmd
E           mycmd  # still runs the cached 'two'
E           echo status=$?
E           
E           
E           # mksh and zsh correctly searches for the executable again!
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command_.test.sh::Non-executable on $PATH[L134]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109243fb0>
test_file = 'command_.test.sh'
test_case = TestCase(name='Non-executable on $PATH', script='# shells differ in whether they actually execve(\'one/cmd\') and get ...', assertions=[Assertion(type='stdout', value='two\nstatus=0', shells=None, variant=None)], line_number=134, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Non-executable on $PATH (line 134)
E           
E           stdout mismatch:
E             expected: 'two\nstatus=0'
E             actual:   'status=1'
E           
E           Expected stdout: 'two\nstatus=0'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: mycmd: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # shells differ in whether they actually execve('one/cmd') and get EPERM
E           
E           mkdir -p one two
E           PATH="one:two:$PATH"
E           
E           rm -f one/mycmd two/mycmd
E           echo 'echo one' > one/mycmd
E           echo 'echo two' > two/mycmd
E           
E           # only make the second one executable
E           chmod +x two/mycmd
E           mycmd
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command_.test.sh::hash without args prints the cache[L154]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092640b0>
test_file = 'command_.test.sh'
test_case = TestCase(name='hash without args prints the cache', script='whoami >/dev/null\nhash\necho status=$?\n\n# bash uses a w...'stdout', value='whoami=/usr/bin/whoami\nstatus=0', shells=['mksh', 'zsh'], variant='OK')], line_number=154, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: hash without args prints the cache (line 154)
E           
E           stdout mismatch:
E             expected: 'hits\tcommand\n   1\t/usr/bin/whoami\nstatus=0'
E             actual:   'status=1'
E           
E           Expected stdout: 'hits\tcommand\n   1\t/usr/bin/whoami\nstatus=0\n'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: hash: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           whoami >/dev/null
E           hash
E           echo status=$?
E           
E           # bash uses a weird table.  Although we could use TSV2.
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[command_.test.sh::hash with args[L172]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109264170>
test_file = 'command_.test.sh'
test_case = TestCase(name='hash with args', script="hash whoami\necho status=$?\nhash | grep -o /whoami  # prints it twice\nhash _...rtion(type='stdout', value='status=0\n/whoami\nstatus=0', shells=['mksh'], variant='BUG')], line_number=172, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: hash with args (line 172)
E           
E           stdout mismatch:
E             expected: 'status=0\n/whoami\nstatus=1'
E             actual:   'status=1\nstatus=1'
E           
E           Expected stdout: 'status=0\n/whoami\nstatus=1'
E           Actual stdout:   'status=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: hash: command not found\nbash: hash: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           hash whoami
E           echo status=$?
E           hash | grep -o /whoami  # prints it twice
E           hash _nonexistent_
E           echo status=$?
E           
E           # mksh doesn't fail
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command_.test.sh::hash -r doesn't allow additional args[L191]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109264230>
test_file = 'command_.test.sh'
test_case = TestCase(name="hash -r doesn't allow additional args", script='hash -r whoami >/dev/null  # avoid weird output with mk...'OK'), Assertion(type='stdout', value='status=0', shells=['dash', 'bash'], variant='BUG')], line_number=191, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: hash -r doesn't allow additional args (line 191)
E           
E           stdout mismatch:
E             expected: 'status=0'
E             actual:   'status=1'
E           
E           Expected stdout: 'status=0'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: hash: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           hash -r whoami >/dev/null  # avoid weird output with mksh
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[command_.test.sh::PATH resolution skips directories and non-executables[L198]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092642f0>
test_file = 'command_.test.sh'
test_case = TestCase(name='PATH resolution skips directories and non-executables', script='# Make the following directory structur...pe='stdout', value='hi\nstatus=0\nhi\nstatus=0\nhi\nstatus=0', shells=None, variant=None)], line_number=198, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: PATH resolution skips directories and non-executables (line 198)
E           
E           stdout mismatch:
E             expected: 'hi\nstatus=0\nhi\nstatus=0\nhi\nstatus=0'
E             actual:   'status=1\nstatus=1\nstatus=1'
E           
E           Expected stdout: 'hi\nstatus=0\nhi\nstatus=0\nhi\nstatus=0'
E           Actual stdout:   'status=1\nstatus=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: hello: command not found\nbash: hash: command not found\nbash: hello: command not found\nbash: hash: command not found\nbash: hello: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Make the following directory structure. File type and permission bits are
E           # given on the left.
E           # [drwxr-xr-x]  _tmp
E           # +-- [drwxr-xr-x]  bin
E           # |   \-- [-rwxr-xr-x]  hello
E           # +-- [drwxr-xr-x]  notbin
E           # |   \-- [-rw-r--r--]  hello
E           # \-- [drwxr-xr-x]  dir
E           #     \-- [drwxr-xr-x]  hello
E           mkdir -p _tmp/bin
E           mkdir -p _tmp/bin2
E           mkdir -p _tmp/notbin
E           mkdir -p _tmp/dir/hello
E           printf '#!/usr/bin/env sh\necho hi\n' >_tmp/notbin/hello
E           printf '#!/usr/bin/env sh\necho hi\n' >_tmp/bin/hello
E           chmod +x _tmp/bin/hello
E           
E           DIR=$PWD/_tmp/dir
E           BIN=$PWD/_tmp/bin
E           NOTBIN=$PWD/_tmp/notbin
E           
E           # The command resolution will search the path for matching *files* (not
E           # directories) WITH the execute bit set.
E           
E           # Should find executable hello right away and run it
E           PATH="$BIN:$PATH" hello
E           echo status=$?
E           
E           hash -r  # Needed to clear the PATH cache
E           
E           # Will see hello dir, skip it and then find&run the hello exe
E           PATH="$DIR:$BIN:$PATH" hello
E           echo status=$?
E           
E           hash -r  # Needed to clear the PATH cache
E           
E           # Will see hello (non-executable) file, skip it and then find&run the hello exe
E           PATH="$NOTBIN:$BIN:$PATH" hello
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dbracket.test.sh::[[ compare with literal -f (compare with test-builtin.test.sh)[L166]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109265370>
test_file = 'dbracket.test.sh'
test_case = TestCase(name='[[ compare with literal -f (compare with test-builtin.test.sh)', script="var=-f\n[[ $var == -f ]] && ec...rue", assertions=[Assertion(type='stdout', value='true\ntrue', shells=None, variant=None)], line_number=166, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ compare with literal -f (compare with test-builtin.test.sh) (line 166)
E           
E           Execution error: Expected ']]' to close conditional at line 3, column 12
E           
E           
E           Script:
E           ---
E           var=-f
E           [[ $var == -f ]] && echo true
E           [[ '-f' == $var ]] && echo true
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dbracket.test.sh::[[ with op variable (compare with test-builtin.test.sh)[L175]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109265430>
test_file = 'dbracket.test.sh'
test_case = TestCase(name='[[ with op variable (compare with test-builtin.test.sh)', script="# Parse error -- parsed BEFORE evalua...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=175, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ with op variable (compare with test-builtin.test.sh) (line 175)
E           
E           Execution error: Expected ']]' to close conditional at line 3, column 6
E           
E           
E           Script:
E           ---
E           # Parse error -- parsed BEFORE evaluation of vars
E           op='=='
E           [[ a $op a ]] && echo true
E           [[ a $op b ]] || echo false
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dbracket.test.sh::[[ at runtime doesn't work[L188]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092655b0>
test_file = 'dbracket.test.sh'
test_case = TestCase(name="[[ at runtime doesn't work", script='dbracket=[[\n$dbracket foo == foo ]]', assertions=[Assertion(type='status', value=127, shells=None, variant=None)], line_number=188, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ at runtime doesn't work (line 188)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 2, column 22
E           
E           
E           Script:
E           ---
E           dbracket=[[
E           $dbracket foo == foo ]]
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dbracket.test.sh::[[ with env prefix doesn't work[L193]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109265670>
test_file = 'dbracket.test.sh'
test_case = TestCase(name="[[ with env prefix doesn't work", script='FOO=bar [[ foo == foo ]]', assertions=[Assertion(type='status', value=127, shells=None, variant=None)], line_number=193, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ with env prefix doesn't work (line 193)
E           
E           status mismatch: expected 127, got 0
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 127
E           Actual status:   0
E           
E           Script:
E           ---
E           FOO=bar [[ foo == foo ]]
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dbracket.test.sh::Argument that looks like a real operator[L207]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092657f0>
test_file = 'dbracket.test.sh'
test_case = TestCase(name='Argument that looks like a real operator', script="[[ -f < ]] && echo 'should be parse error'", asserti...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=207, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Argument that looks like a real operator (line 207)
E           
E           status mismatch: expected 2, got 1
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   1
E           
E           Script:
E           ---
E           [[ -f < ]] && echo 'should be parse error'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dbracket.test.sh::-eq does dynamic arithmetic parsing (not supported in OSH)[L267]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109265bb0>
test_file = 'dbracket.test.sh'
test_case = TestCase(name='-eq does dynamic arithmetic parsing (not supported in OSH)', script="[[ 1+2 -eq 3 ]] && echo true\nexpr...sed", assertions=[Assertion(type='stdout', value='true\ntrue', shells=None, variant=None)], line_number=267, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: -eq does dynamic arithmetic parsing (not supported in OSH) (line 267)
E           
E           stdout mismatch:
E             expected: 'true\ntrue'
E             actual:   ''
E           
E           Expected stdout: 'true\ntrue'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           [[ 1+2 -eq 3 ]] && echo true
E           expr='1+2'
E           [[ $expr -eq 3 ]] && echo true  # must be dynamically parsed
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dbracket.test.sh::[[ '(' foo ]] is syntax error[L286]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109265df0>
test_file = 'dbracket.test.sh'
test_case = TestCase(name="[[ '(' foo ]] is syntax error", script="[[ '(' foo ]]\necho status=$?", assertions=[Assertion(type='sta...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=286, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ '(' foo ]] is syntax error (line 286)
E           
E           Execution error: Expected ']]' to close conditional at line 1, column 8
E           
E           
E           Script:
E           ---
E           [[ '(' foo ]]
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dbracket.test.sh::[[ -z ]] is syntax error[L297]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109265f70>
test_file = 'dbracket.test.sh'
test_case = TestCase(name='[[ -z ]] is syntax error', script='[[ -z ]]\necho status=$?', assertions=[Assertion(type='status', valu...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=297, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ -z ]] is syntax error (line 297)
E           
E           Execution error: Expected operand after -z at line 1, column 7
E           
E           
E           Script:
E           ---
E           [[ -z ]]
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dbracket.test.sh::[[ -z '>' a ]] is syntax error[L307]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092660f0>
test_file = 'dbracket.test.sh'
test_case = TestCase(name="[[ -z '>' a ]] is syntax error", script="[[ -z '>' -- ]]\necho status=$?", assertions=[Assertion(type='...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=307, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ -z '>' a ]] is syntax error (line 307)
E           
E           Execution error: Expected ']]' to close conditional at line 1, column 11
E           
E           
E           Script:
E           ---
E           [[ -z '>' -- ]]
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dbracket.test.sh::[[ ]] is syntax error[L318]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109266270>
test_file = 'dbracket.test.sh'
test_case = TestCase(name='[[ ]] is syntax error', script='[[ ]]\necho status=$?', assertions=[Assertion(type='stdout-json', value...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=318, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ ]] is syntax error (line 318)
E           
E           Execution error: Expected conditional expression at line 1, column 4
E           
E           
E           Script:
E           ---
E           [[ ]]
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dbracket.test.sh::[[ && ]] is syntax error[L325]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109266330>
test_file = 'dbracket.test.sh'
test_case = TestCase(name='[[ && ]] is syntax error', script='[[ && ]]\necho status=$?', assertions=[Assertion(type='stdout-json',...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=325, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ && ]] is syntax error (line 325)
E           
E           Execution error: Expected conditional expression at line 1, column 4
E           
E           
E           Script:
E           ---
E           [[ && ]]
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dbracket.test.sh::[[ a 3< b ]] doesn't work (bug regression)[L332]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092663f0>
test_file = 'dbracket.test.sh'
test_case = TestCase(name="[[ a 3< b ]] doesn't work (bug regression)", script='[[ a 3< b ]]\necho status=$?\n[[ a 3> b ]]\necho s...sertion(type='stdout', value='status=0\nstatus=1', shells=['mksh', 'zsh'], variant='BUG')], line_number=332, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ a 3< b ]] doesn't work (bug regression) (line 332)
E           
E           Execution error: Expected ']]' to close conditional at line 1, column 6
E           
E           
E           Script:
E           ---
E           [[ a 3< b ]]
E           echo status=$?
E           [[ a 3> b ]]
E           echo status=$?
E           
E           # Hm these shells use the same redirect trick that OSH used to!
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dbracket.test.sh::tilde expansion with =~ (confusing)[L390]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109266630>
test_file = 'dbracket.test.sh'
test_case = TestCase(name='tilde expansion with =~ (confusing)', script="case $SH in mksh) exit ;; esac\n\nHOME=foo\n[[ ~ =~ $HOME...], variant='OK'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='N-I')], line_number=390, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: tilde expansion with =~ (confusing) (line 390)
E           
E           stdout mismatch:
E             expected: 'regex=0\nregex=0\nregex=1\nregex=0'
E             actual:   'regex=0\nregex=0\nregex=1\nregex=1'
E           
E           Expected stdout: 'regex=0\nregex=0\nregex=1\nregex=0'
E           Actual stdout:   'regex=0\nregex=0\nregex=1\nregex=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           HOME=foo
E           [[ ~ =~ $HOME ]]
E           echo regex=$?
E           [[ $HOME =~ ~ ]]
E           echo regex=$?
E           
E           HOME='^a$'  # looks like regex
E           [[ ~ =~ $HOME ]]
E           echo regex=$?
E           [[ $HOME =~ ~ ]]
E           echo regex=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dbracket.test.sh::[[ ]] with redirect[L419]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092666f0>
test_file = 'dbracket.test.sh'
test_case = TestCase(name='[[ ]] with redirect', script='[[ $(stdout_stderr.py) == STDOUT ]] 2>$TMP/x.txt\necho $?\necho --\ncat $...', assertions=[Assertion(type='stdout', value='0\n--\nSTDERR', shells=None, variant=None)], line_number=419, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ ]] with redirect (line 419)
E           
E           stdout mismatch:
E             expected: '0\n--\nSTDERR'
E             actual:   '0\n--'
E           
E           Expected stdout: '0\n--\nSTDERR'
E           Actual stdout:   '0\n--\n'
E           Expected stderr: None
E           Actual stderr:   'cat: /tmp/x.txt: No such file or directory\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           [[ $(stdout_stderr.py) == STDOUT ]] 2>$TMP/x.txt
E           echo $?
E           echo --
E           cat $TMP/x.txt
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dbracket.test.sh::\\(\\) in pattern (regression)[L441]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109266870>
test_file = 'dbracket.test.sh'
test_case = TestCase(name='\\(\\) in pattern (regression)', script="if [[ 'foo()' == *\\(\\) ]]; then echo match1; fi\nif [[ 'foo(...sertion(type='stdout', value='match1\nmatch2\nmatch1\nmatch2', shells=None, variant=None)], line_number=441, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \(\) in pattern (regression) (line 441)
E           
E           stdout mismatch:
E             expected: 'match1\nmatch2\nmatch1\nmatch2'
E             actual:   ''
E           
E           Expected stdout: 'match1\nmatch2\nmatch1\nmatch2'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           if [[ 'foo()' == *\(\) ]]; then echo match1; fi
E           if [[ 'foo()' == *'()' ]]; then echo match2; fi
E           if [[ 'foo()' == '*()' ]]; then echo match3; fi
E           
E           shopt -s extglob
E           
E           if [[ 'foo()' == *\(\) ]]; then echo match1; fi
E           if [[ 'foo()' == *'()' ]]; then echo match2; fi
E           if [[ 'foo()' == '*()' ]]; then echo match3; fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dparen.test.sh::bash: K in (( A[K] = V )) is a constant string[L68]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109266f30>
test_file = 'dparen.test.sh'
test_case = TestCase(name='bash: K in (( A[K] = V )) is a constant string', script='K=5\nV=42\ntypeset -A A\n(( A[K] = V ))\n\nech...], variant='N-I'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='N-I')], line_number=68, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: bash: K in (( A[K] = V )) is a constant string (line 68)
E           
E           stdout mismatch:
E             expected: 'A[5]=\nkeys = K\nvalues = 42'
E             actual:   'A[5]=\nkeys = 5\nvalues = 42'
E           
E           Expected stdout: 'A[5]=\nkeys = K\nvalues = 42'
E           Actual stdout:   'A[5]=\nkeys = 5\nvalues = 42\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           K=5
E           V=42
E           typeset -A A
E           (( A[K] = V ))
E           
E           echo A["5"]=${A["5"]}
E           echo keys = ${!A[@]}
E           echo values = ${A[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dparen.test.sh::bash: V in (( A["K"] = V )) gets coerced to integer[L108]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092670b0>
test_file = 'dparen.test.sh'
test_case = TestCase(name='bash: V in (( A["K"] = V )) gets coerced to integer', script='shopt -u strict_arith || true\nK=key\nV=v...mksh'], variant='N-I'), Assertion(type='status', value=1, shells=['mksh'], variant='N-I')], line_number=108, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: bash: V in (( A["K"] = V )) gets coerced to integer (line 108)
E           
E           stdout mismatch:
E             expected: 'A["key"]=\nkeys = K\nvalues = 0'
E             actual:   'A["key"]=\nkeys = 0\nvalues = 0'
E           
E           Expected stdout: 'A["key"]=\nkeys = K\nvalues = 0'
E           Actual stdout:   'A["key"]=\nkeys = 0\nvalues = 0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -u strict_arith || true
E           K=key
E           V=value
E           typeset -A A || exit 1
E           (( A["K"] = V ))
E           
E           # not there!
E           echo A[\"key\"]=${A[$K]}
E           
E           echo keys = ${!A[@]}
E           echo values = ${A[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dparen.test.sh::literal strings inside (( ))[L130]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109267170>
test_file = 'dparen.test.sh'
test_case = TestCase(name='literal strings inside (( ))', script="declare -A A\nA['x']=42\n(( x = A['x'] ))\n(( A['y'] = 'y' ))  #...sh'], variant='N-I'), Assertion(type='stdout', value='42', shells=['zsh'], variant='N-I')], line_number=130, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: literal strings inside (( )) (line 130)
E           
E           stdout mismatch:
E             expected: '42 0'
E             actual:   '42'
E           
E           Expected stdout: '42 0'
E           Actual stdout:   '42\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A A
E           A['x']=42
E           (( x = A['x'] ))
E           (( A['y'] = 'y' ))  # y is a variable, gets coerced to 0
E           echo $x ${A['y']}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dparen.test.sh::(( )) with redirect[L148]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109267230>
test_file = 'dparen.test.sh'
test_case = TestCase(name='(( )) with redirect', script='(( a = $(stdout_stderr.py 42) + 10 )) 2>$TMP/x.txt\necho $a\necho --\ncat..., assertions=[Assertion(type='stdout', value='52\n--\nSTDERR', shells=None, variant=None)], line_number=148, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: (( )) with redirect (line 148)
E           
E           stdout mismatch:
E             expected: '52\n--\nSTDERR'
E             actual:   '10\n--'
E           
E           Expected stdout: '52\n--\nSTDERR'
E           Actual stdout:   '10\n--\n'
E           Expected stderr: None
E           Actual stderr:   'cat: /tmp/x.txt: No such file or directory\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           (( a = $(stdout_stderr.py 42) + 10 )) 2>$TMP/x.txt
E           echo $a
E           echo --
E           cat $TMP/x.txt
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dparen.test.sh::Assigning whole raray (( b = a ))[L159]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092672f0>
test_file = 'dparen.test.sh'
test_case = TestCase(name='Assigning whole raray (( b = a ))', script='a=(4 5 6)\n(( b = a ))\n\necho "${a[@]}"\n\n# OSH doesn\'t ... variant='BUG'), Assertion(type='stdout', value='4 5 6\n', shells=['zsh'], variant='BUG')], line_number=159, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Assigning whole raray (( b = a )) (line 159)
E           
E           stdout mismatch:
E             expected: '4 5 6\n4'
E             actual:   '4 5 6'
E           
E           Expected stdout: '4 5 6\n4'
E           Actual stdout:   '4 5 6\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(4 5 6)
E           (( b = a ))
E           
E           echo "${a[@]}"
E           
E           # OSH doesn't like this
E           echo "${b[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[dparen.test.sh::Example of incrementing associative array entry with var key (ble.sh)[L192]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109267470>
test_file = 'dparen.test.sh'
test_case = TestCase(name='Example of incrementing associative array entry with var key (ble.sh)', script="declare -A A=(['foo']=4...nt='N-I'), Assertion(type='stdout-json', value='', shells=['mksh', 'zsh'], variant='N-I')], line_number=192, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Example of incrementing associative array entry with var key (ble.sh) (line 192)
E           
E           stdout mismatch:
E             expected: 'foo=44'
E             actual:   'foo=42'
E           
E           Expected stdout: 'foo=44'
E           Actual stdout:   'foo=42\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A A=(['foo']=42)
E           key='foo'
E           
E           # note: in bash, (( A[\$key] += 1 )) works the same way.
E           
E           set -- 1 2
E           (( A[$key] += $2 ))
E           
E           echo foo=${A['foo']}
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[empty-bodies.test.sh::Empty do/done[L3]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109267530>
test_file = 'empty-bodies.test.sh'
test_case = TestCase(name='Empty do/done', script='while false; do\ndone\necho empty', assertions=[Assertion(type='stdout', value=...'], variant='OK'), Assertion(type='status', value=2, shells=['dash', 'bash'], variant='OK')], line_number=3, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Empty do/done (line 3)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'empty'
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'empty\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           while false; do
E           done
E           echo empty
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[empty-bodies.test.sh::Empty then/fi[L17]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092676b0>
test_file = 'empty-bodies.test.sh'
test_case = TestCase(name='Empty then/fi', script='if foo; then\nfi\necho empty', assertions=[Assertion(type='stdout', value='empt...=['mksh'], variant='OK'), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=17, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Empty then/fi (line 17)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'empty'
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'empty\n'
E           Expected stderr: None
E           Actual stderr:   'bash: foo: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           if foo; then
E           fi
E           echo empty
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[errexit.test.sh::errexit for nonexistent command[L13]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109267830>
test_file = 'errexit.test.sh'
test_case = TestCase(name='errexit for nonexistent command', script='set -o errexit\nnonexistent__ZZ\necho done', assertions=[Asse...shells=None, variant=None), Assertion(type='status', value=127, shells=None, variant=None)], line_number=13, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: errexit for nonexistent command (line 13)
E           
E           status mismatch: expected 127, got 1
E           
E           Expected stdout: ''
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: nonexistent__ZZ: command not found\n'
E           Expected status: 127
E           Actual status:   1
E           
E           Script:
E           ---
E           set -o errexit
E           nonexistent__ZZ
E           echo done
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[errexit.test.sh::errexit with { }[L27]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092679b0>
test_file = 'errexit.test.sh'
test_case = TestCase(name='errexit with { }', script="# This aborts because it's not part of an if statement.\nset -o errexit\n{ e..., shells=None, variant=None), Assertion(type='status', value=1, shells=None, variant=None)], line_number=27, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: errexit with { } (line 27)
E           
E           stdout mismatch:
E             expected: 'one'
E             actual:   ''
E           
E           Expected stdout: 'one'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           # This aborts because it's not part of an if statement.
E           set -o errexit
E           { echo one; false; echo two; }
E           ---

tests/spec_tests/test_spec.py:218: Failed
______ TestBashSpecTests.test_spec_case[errexit.test.sh::More && ||[L73]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109267e30>
test_file = 'errexit.test.sh'
test_case = TestCase(name='More && ||', script="$SH -c 'set -e; false || { echo group; false; }; echo bad'\necho status=$?\necho\n...t', value='group\nstatus=1\n\nsubshell\nstatus=42\n\nstatus=1', shells=None, variant=None)], line_number=73, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: More && || (line 73)
E           
E           stdout mismatch:
E             expected: 'group\nstatus=1\n\nsubshell\nstatus=42\n\nstatus=1'
E             actual:   'status=1\n\nsubshell\nstatus=42\n\nstatus=1'
E           
E           Expected stdout: 'group\nstatus=1\n\nsubshell\nstatus=42\n\nstatus=1'
E           Actual stdout:   'status=1\n\nsubshell\nstatus=42\n\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: /bin/false: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           $SH -c 'set -e; false || { echo group; false; }; echo bad'
E           echo status=$?
E           echo
E           
E           $SH -c 'set -e; false || ( echo subshell; exit 42 ); echo bad'
E           echo status=$?
E           echo
E           
E           # noforklast optimization
E           $SH -c 'set -e; false || /bin/false; echo bad'
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[errexit.test.sh::errexit and brace group { }[L105]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109267fb0>
test_file = 'errexit.test.sh'
test_case = TestCase(name='errexit and brace group { }', script='set -o errexit\n{ test no = yes && echo hi; }\necho status=$?', assertions=[Assertion(type='stdout', value='status=1', shells=None, variant=None)], line_number=105, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: errexit and brace group { } (line 105)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   ''
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           set -o errexit
E           { test no = yes && echo hi; }
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[errexit.test.sh::errexit with subshell[L164]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109294470>
test_file = 'errexit.test.sh'
test_case = TestCase(name='errexit with subshell', script='set -o errexit\n( echo one; false; echo two; )\necho three', assertions...lls=None, variant=None), Assertion(type='stdout', value='one', shells=None, variant=None)], line_number=164, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: errexit with subshell (line 164)
E           
E           stdout mismatch:
E             expected: 'one'
E             actual:   ''
E           
E           Expected stdout: 'one'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           set -o errexit
E           ( echo one; false; echo two; )
E           echo three
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[errexit.test.sh::setting errexit in a subshell works but doesn't affect parent shell[L227]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109294770>
test_file = 'errexit.test.sh'
test_case = TestCase(name="setting errexit in a subshell works but doesn't affect parent shell", script='( echo 1; false; echo 2; ...', assertions=[Assertion(type='stdout', value='1\n2\n3\n5\n6', shells=None, variant=None)], line_number=227, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: setting errexit in a subshell works but doesn't affect parent shell (line 227)
E           
E           stdout mismatch:
E             expected: '1\n2\n3\n5\n6'
E             actual:   ''
E           
E           Expected stdout: '1\n2\n3\n5\n6'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           ( echo 1; false; echo 2; set -o errexit; echo 3; false; echo 4; )
E           echo 5
E           false
E           echo 6
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[errexit.test.sh::set errexit while it's ignored in a subshell (moot with strict_errexit)[L240]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109294830>
test_file = 'errexit.test.sh'
test_case = TestCase(name="set errexit while it's ignored in a subshell (moot with strict_errexit)", script='set -o errexit\nif ( ...iant=None), Assertion(type='stdout', value='1\n2\n3\n4\n5\n6', shells=None, variant=None)], line_number=240, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set errexit while it's ignored in a subshell (moot with strict_errexit) (line 240)
E           
E           stdout mismatch:
E             expected: '1\n2\n3\n4\n5\n6'
E             actual:   ''
E           
E           Expected stdout: '1\n2\n3\n4\n5\n6'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           set -o errexit
E           if ( echo 1; false; echo 2; set -o errexit; echo 3; false; echo 4 ); then
E             echo 5;
E           fi
E           echo 6  # This is executed because the subshell just returns false
E           false 
E           echo 7
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[errexit.test.sh::background processes respect errexit[L282]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109294a70>
test_file = 'errexit.test.sh'
test_case = TestCase(name='background processes respect errexit', script='set -o errexit\n{ echo one; false; echo two; exit 42; } ...lls=None, variant=None), Assertion(type='stdout', value='one', shells=None, variant=None)], line_number=282, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: background processes respect errexit (line 282)
E           
E           stdout mismatch:
E             expected: 'one'
E             actual:   ''
E           
E           Expected stdout: 'one'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           set -o errexit
E           { echo one; false; echo two; exit 42; } &
E           wait $!
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[errexit.test.sh::pipeline process respects errexit[L291]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109294b30>
test_file = 'errexit.test.sh'
test_case = TestCase(name='pipeline process respects errexit', script='set -o errexit\n# It is respected here.\n{ echo one; false;... variant=None), Assertion(type='stdout', value='one\n[three]', shells=None, variant=None)], line_number=291, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: pipeline process respects errexit (line 291)
E           
E           stdout mismatch:
E             expected: 'one\n[three]'
E             actual:   ''
E           
E           Expected stdout: 'one\n[three]'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           set -o errexit
E           # It is respected here.
E           { echo one; false; echo two; } | cat
E           
E           # Also respected here.
E           { echo three; echo four; } | while read line; do
E             echo "[$line]"
E             false
E           done
E           echo four
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[errexit.test.sh::simple command / assign - redir failure DOES respect errexit[L308]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109294bf0>
test_file = 'errexit.test.sh'
test_case = TestCase(name='simple command / assign - redir failure DOES respect errexit', script="$SH -c '\nset -o errexit\ntrue >...rtion(type='stdout', value='status=2\nstatus=2\nstatus=2', shells=['dash'], variant='OK')], line_number=308, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: simple command / assign - redir failure DOES respect errexit (line 308)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1\nstatus=1'
E             actual:   'builtin status=0\nstatus=0\nstatus=1\nassign status=0\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1\nstatus=1'
E           Actual stdout:   'builtin status=0\nstatus=0\nstatus=1\nassign status=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   'bash: /bin/true: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           $SH -c '
E           set -o errexit
E           true > /
E           echo builtin status=$?
E           '
E           echo status=$?
E           
E           $SH -c '
E           set -o errexit
E           /bin/true > /
E           echo extern status=$?
E           '
E           echo status=$?
E           
E           $SH -c '
E           set -o errexit
E           assign=foo > /
E           echo assign status=$?
E           '
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[errexit.test.sh::bash atoms [[ (( - redir failure checked[L369]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109294d70>
test_file = 'errexit.test.sh'
test_case = TestCase(name='bash atoms [[ (( - redir failure checked', script="# bash 5.2 fixed bash 4.4 bug: this is now checked\n...'ash'], variant='OK'), Assertion(type='stdout', value='', shells=['dash'], variant='N-I')], line_number=369, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: bash atoms [[ (( - redir failure checked (line 369)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1'
E             actual:   'dbracket status=0\nstatus=0\ndparen status=0\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1'
E           Actual stdout:   'dbracket status=0\nstatus=0\ndparen status=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash 5.2 fixed bash 4.4 bug: this is now checked
E           
E           case $SH in dash) exit ;; esac
E           
E           $SH -c '
E           set -o errexit
E           [[ x = x ]] > /
E           echo dbracket status=$?
E           '
E           echo status=$?
E           
E           $SH -c '
E           set -o errexit
E           (( 42 )) > /
E           echo dparen status=$?
E           '
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[errexit.test.sh::brace group - redir failure checked[L402]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109294e30>
test_file = 'errexit.test.sh'
test_case = TestCase(name='brace group - redir failure checked', script="# bash 5.2 fixed bash 4.4 bug: this is now checked\n\n# c...tion(type='stdout', value='status=1\nshould not get here', shells=['ash'], variant='BUG')], line_number=402, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: brace group - redir failure checked (line 402)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'status=0\nshould not get here'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'status=0\nshould not get here\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash 5.2 fixed bash 4.4 bug: this is now checked
E           
E           # case from
E           # https://lists.gnu.org/archive/html/bug-bash/2020-05/msg00066.html
E           
E           set -o errexit
E           
E           { cat ; } < not_exist.txt   
E           
E           echo status=$?
E           echo 'should not get here'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[errexit.test.sh::while loop - redirect failure checked[L433]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109294ef0>
test_file = 'errexit.test.sh'
test_case = TestCase(name='while loop - redirect failure checked', script="# bash 5.2 fixed bash 4.4 bug: this is now checked\n\n#...tion(type='stdout', value='status=1\nshould not get here', shells=['ash'], variant='BUG')], line_number=433, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: while loop - redirect failure checked (line 433)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'status=0\nshould not get here'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'status=0\nshould not get here\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash 5.2 fixed bash 4.4 bug: this is now checked
E           
E           # case from
E           # https://lists.gnu.org/archive/html/bug-bash/2020-05/msg00066.html
E           
E           set -o errexit
E           
E           while read line; do
E            echo $line
E           done < not_exist.txt   
E           
E           echo status=$?
E           echo 'should not get here'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[exit-status.test.sh::Truncating 'exit' status[L10]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092951f0>
test_file = 'exit-status.test.sh'
test_case = TestCase(name="Truncating 'exit' status", script="$SH -c 'exit 255'\necho status=$?\n\n$SH -c 'exit 256'\necho status=...='status=255\nstatus=0\nstatus=1\n===\nstatus=2\nstatus=2', shells=['dash'], variant='OK')], line_number=10, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Truncating 'exit' status (line 10)
E           
E           stdout mismatch:
E             expected: 'status=255\nstatus=0\nstatus=1\n===\nstatus=255\nstatus=254'
E             actual:   ''
E           
E           Expected stdout: 'status=255\nstatus=0\nstatus=1\n===\nstatus=255\nstatus=254'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   255
E           
E           Script:
E           ---
E           $SH -c 'exit 255'
E           echo status=$?
E           
E           $SH -c 'exit 256'
E           echo status=$?
E           
E           $SH -c 'exit 257'
E           echo status=$?
E           
E           echo ===
E           
E           $SH -c 'exit -1'
E           echo status=$?
E           
E           $SH -c 'exit -2'
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[exit-status.test.sh::subshell OverflowError https://github.com/oilshell/oil/issues/996[L83]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109295370>
test_file = 'exit-status.test.sh'
test_case = TestCase(name='subshell OverflowError https://github.com/oilshell/oil/issues/996', script='# We have to capture stderr...-\nstatus=2\nreturn: can only\nstatus=2\nreturn: can only', shells=['bash'], variant='OK')], line_number=83, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: subshell OverflowError https://github.com/oilshell/oil/issues/996 (line 83)
E           
E           Execution error: return 255
E           
E           
E           Script:
E           ---
E           # We have to capture stderr here 
E           
E           filter_err() {
E             # check for bash/dash/mksh messages, and unwanted Python OverflowError
E             egrep -o 'Illegal number|bad number|return: can only|expected a small integer|OverflowError'
E             return 0
E           }
E           
E           # true; disables subshell optimization!
E           
E           # exit status too big, but integer isn't
E           $SH -c 'true; ( return 2147483647; )' 2>err.txt
E           echo status=$?
E           cat err.txt | filter_err
E           
E           # now integer is too big
E           $SH -c 'true; ( return 2147483648; )' 2> err.txt
E           echo status=$?
E           cat err.txt | filter_err
E           
E           # even bigger
E           $SH -c 'true; ( return 2147483649; )' 2> err.txt
E           echo status=$?
E           cat err.txt | filter_err
E           
E           echo
E           echo '--- negative ---'
E           
E           # negative vlaues
E           $SH -c 'true; ( return -2147483648; )' 2>err.txt
E           echo status=$?
E           cat err.txt | filter_err
E           
E           # negative vlaues
E           $SH -c 'true; ( return -2147483649; )' 2>err.txt
E           echo status=$?
E           cat err.txt | filter_err
E           
E           
E           # osh-cpp checks overflow, but osh-py doesn't
E           
E           
E           # mksh behaves similarly, uses '1' as its "bad status" status!
E           
E           
E           # dash is similar, but seems to reject negative numbers
E           
E           
E           # bash disallows return at top level
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[exit-status.test.sh::If subshell true[L263]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092955b0>
test_file = 'exit-status.test.sh'
test_case = TestCase(name='If subshell true', script='if `true`; then echo TRUE; else echo FALSE; fi', assertions=[Assertion(type=... shells=None, variant=None), Assertion(type='status', value=0, shells=None, variant=None)], line_number=263, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: If subshell true (line 263)
E           
E           stdout mismatch:
E             expected: 'TRUE'
E             actual:   'FALSE'
E           
E           Expected stdout: 'TRUE'
E           Actual stdout:   'FALSE\n'
E           Expected stderr: None
E           Actual stderr:   'bash: : command not found\n'
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           if `true`; then echo TRUE; else echo FALSE; fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[exit-status.test.sh::Exit code when command sub evaluates to empty str, e.g. `false` (#2416)[L283]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092958b0>
test_file = 'exit-status.test.sh'
test_case = TestCase(name='Exit code when command sub evaluates to empty str, e.g. `false` (#2416)', script='# OSH had a bug here\...[Assertion(type='stdout', value='0\n1\n0\n1\n---\n0\n1\n0\n1', shells=None, variant=None)], line_number=283, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Exit code when command sub evaluates to empty str, e.g. `false` (#2416) (line 283)
E           
E           stdout mismatch:
E             expected: '0\n1\n0\n1\n---\n0\n1\n0\n1'
E             actual:   '1\n1\n1\n1\n---\n0\n1\n0\n1'
E           
E           Expected stdout: '0\n1\n0\n1\n---\n0\n1\n0\n1'
E           Actual stdout:   '1\n1\n1\n1\n---\n0\n1\n0\n1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: : command not found\nbash: : command not found\nbash: : command not found\nbash: : command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # OSH had a bug here
E           `true`; echo $?
E           `false`; echo $?
E           $(true); echo $?
E           $(false); echo $?
E           echo ---
E           
E           # OSH and others agree on these
E           eval true; echo $?
E           eval false; echo $?
E           `echo true`; echo $?
E           `echo false`; echo $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[exit-status.test.sh::More test cases with empty argv[L309]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109295970>
test_file = 'exit-status.test.sh'
test_case = TestCase(name='More test cases with empty argv', script='true $(false)\necho status=$?\n\n$(exit 42)\necho status=$?\n...sertion(type='stdout', value='status=0\nstatus=42\nstatus=43', shells=None, variant=None)], line_number=309, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: More test cases with empty argv (line 309)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=42\nstatus=43'
E             actual:   'status=0\nstatus=1\nstatus=1'
E           
E           Expected stdout: 'status=0\nstatus=42\nstatus=43'
E           Actual stdout:   'status=0\nstatus=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: : command not found\nbash: : command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           true $(false)
E           echo status=$?
E           
E           $(exit 42)
E           echo status=$?
E           
E           $(exit 42) $(exit 43)
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[extglob-files.test.sh::Two adjacent alternations[L76]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109295df0>
test_file = 'extglob-files.test.sh'
test_case = TestCase(name='Two adjacent alternations', script='shopt -s extglob\nmkdir -p 2\ntouch 2/{aa,ab,ac,ba,bb,bc,ca,cb,cc}\...2/ab 2/ac 2/cb 2/cc\n2/ab 2/ac 2/bb 2/bc 2/cb 2/cc\n2/ab 2/ac', shells=None, variant=None)], line_number=76, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Two adjacent alternations (line 76)
E           
E           stdout mismatch:
E             expected: '2/ab 2/ac 2/cb 2/cc\n2/ab 2/ac 2/bb 2/bc 2/cb 2/cc\n2/ab 2/ac'
E             actual:   '2/ab 2/ac 2/bb 2/bc 2/cb 2/cc\n2/ab 2/ac 2/bb 2/bc 2/cb 2/cc\n2/ab 2/ac'
E           
E           Expected stdout: '2/ab 2/ac 2/cb 2/cc\n2/ab 2/ac 2/bb 2/bc 2/cb 2/cc\n2/ab 2/ac'
E           Actual stdout:   '2/ab 2/ac 2/bb 2/bc 2/cb 2/cc\n2/ab 2/ac 2/bb 2/bc 2/cb 2/cc\n2/ab 2/ac\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s extglob
E           mkdir -p 2
E           touch 2/{aa,ab,ac,ba,bb,bc,ca,cb,cc}
E           echo 2/!(b)@(b|c)
E           echo 2/!(b)?@(b|c)  # wildcard in between
E           echo 2/!(b)a@(b|c)  # constant in between
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[extglob-files.test.sh::Glob other punctuation chars (lexer mode)[L133]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092961b0>
test_file = 'extglob-files.test.sh'
test_case = TestCase(name='Glob other punctuation chars (lexer mode)', script="shopt -s extglob\nmkdir -p eg5\ncd eg5\ntouch __{aa...ype='stdout', value="['__#', '__&&', '__<>', '__aa', '__{}']", shells=None, variant=None)], line_number=133, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Glob other punctuation chars (lexer mode) (line 133)
E           
E           stdout mismatch:
E             expected: "['__#', '__&&', '__<>', '__aa', '__{}']"
E             actual:   "['@(__aa|__<>|__{}|__#|__&&|)']"
E           
E           Expected stdout: "['__#', '__&&', '__<>', '__aa', '__{}']"
E           Actual stdout:   "['@(__aa|__<>|__{}|__#|__&&|)']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s extglob
E           mkdir -p eg5
E           cd eg5
E           touch __{aa,'<>','{}','#','&&'}
E           argv.py @(__aa|'__<>'|__{}|__#|__&&|)
E           
E           # mksh sorts them differently
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[extglob-files.test.sh::Escaping of pipe (glibc bug, see demo/glibc_fnmatch.c)[L159]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109296330>
test_file = 'extglob-files.test.sh'
test_case = TestCase(name='Escaping of pipe (glibc bug, see demo/glibc_fnmatch.c)', script="shopt -s extglob\n\nmkdir -p extpipe\n...sertion(type='stdout', value="['__|', 'foo']\n['__|', 'foo']", shells=None, variant=None)], line_number=159, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Escaping of pipe (glibc bug, see demo/glibc_fnmatch.c) (line 159)
E           
E           stdout mismatch:
E             expected: "['__|', 'foo']\n['__|', 'foo']"
E             actual:   "['foo']\n['foo']"
E           
E           Expected stdout: "['__|', 'foo']\n['__|', 'foo']"
E           Actual stdout:   "['foo']\n['foo']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s extglob
E           
E           mkdir -p extpipe
E           cd extpipe
E           
E           touch '__|' foo
E           argv.py @('foo'|__\||bar)
E           argv.py @('foo'|'__|'|bar)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[extglob-files.test.sh::Extended glob in same word as array[L224]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109296570>
test_file = 'extglob-files.test.sh'
test_case = TestCase(name='Extended glob in same word as array', script='shopt -s extglob\nmkdir -p eg10\ncd eg10\n\ntouch {\'a b ...['osh'], variant='N-I'), Assertion(type='status', value=1, shells=['osh'], variant='N-I')], line_number=224, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Extended glob in same word as array (line 224)
E           
E           stdout mismatch:
E             expected: "['a b', 'c']\n['star', 'glob', 'a b c.py']\n['star', 'extglob', 'a b c.cc', 'a b c.py']\n['at', 'extglob', 'a b', 'cee.cc', 'cee.py']"
E             actual:   "['a b', 'c']\n['star', 'glob', 'a b c*.py']\n['star', 'extglob', 'a b c*@(.py|cc)']\n['at', 'extglob', 'a b', 'c*@(.py|cc)']"
E           
E           Expected stdout: "['a b', 'c']\n['star', 'glob', 'a b c.py']\n['star', 'extglob', 'a b c.cc', 'a b c.py']\n['at', 'extglob', 'a b', 'cee.cc', 'cee.py']"
E           Actual stdout:   "['a b', 'c']\n['star', 'glob', 'a b c*.py']\n['star', 'extglob', 'a b c*@(.py|cc)']\n['at', 'extglob', 'a b', 'c*@(.py|cc)']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s extglob
E           mkdir -p eg10
E           cd eg10
E           
E           touch {'a b c',bee,cee}.{py,cc}
E           set -- 'a b' 'c'
E           
E           argv.py "$@"
E           
E           # This works!
E           argv.py star glob "$*"*.py
E           argv.py star extglob "$*"*@(.py|cc)
E           
E           # Hm this actually still works!  the first two parts are literal.  And then
E           # there's something like the simple_word_eval algorithm on the rest.  Gah.
E           argv.py at extglob "$@"*@(.py|cc)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[extglob-files.test.sh::Extended glob with word splitting[L255]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109296630>
test_file = 'extglob-files.test.sh'
test_case = TestCase(name='Extended glob with word splitting', script="shopt -s extglob\nmkdir -p 3\ncd 3\n\nx='a b'\ntouch bar.{c...=None), Assertion(type='stdout', value="['a b*.@(cc|h)']", shells=['osh'], variant='N-I')], line_number=255, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Extended glob with word splitting (line 255)
E           
E           stdout mismatch:
E             expected: "['a', 'bar.cc', 'bar.h']"
E             actual:   "['a', 'b*.@(cc|h)']"
E           
E           Expected stdout: "['a', 'bar.cc', 'bar.h']"
E           Actual stdout:   "['a', 'b*.@(cc|h)']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s extglob
E           mkdir -p 3
E           cd 3
E           
E           x='a b'
E           touch bar.{cc,h}
E           
E           # OSH may disallow splitting when there's an extended glob
E           argv.py $x*.@(cc|h)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[extglob-files.test.sh::In Array Literal and for loop[L273]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092966f0>
test_file = 'extglob-files.test.sh'
test_case = TestCase(name='In Array Literal and for loop', script='shopt -s extglob\nmkdir -p eg11\ncd eg11\ntouch {foo,bar,spam}....type='stdout', value='bar.py\nfoo.py\n---\nzzz bar.py foo.py', shells=None, variant=None)], line_number=273, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: In Array Literal and for loop (line 273)
E           
E           stdout mismatch:
E             expected: 'bar.py\nfoo.py\n---\nzzz bar.py foo.py'
E             actual:   'bar.py\nfoo.py\n---\nzzz @(fo*|bar).py'
E           
E           Expected stdout: 'bar.py\nfoo.py\n---\nzzz bar.py foo.py'
E           Actual stdout:   'bar.py\nfoo.py\n---\nzzz @(fo*|bar).py\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s extglob
E           mkdir -p eg11
E           cd eg11
E           touch {foo,bar,spam}.py
E           for x in @(fo*|bar).py; do
E             echo $x
E           done
E           
E           echo ---
E           declare -a A
E           A=(zzz @(fo*|bar).py)
E           echo "${A[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[extglob-files.test.sh::no match[L305]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109296870>
test_file = 'extglob-files.test.sh'
test_case = TestCase(name='no match', script="shopt -s extglob\necho @(__nope__)\n\n# OSH has glob quoting here\necho @(__nope__*|..., value='@(__nope__)\n@(__nope__*|__nope__?|*|?|[:alpha:]||)', shells=None, variant=None)], line_number=305, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: no match (line 305)
E           
E           stdout mismatch:
E             expected: '@(__nope__)\n@(__nope__*|__nope__?|*|?|[:alpha:]||)'
E             actual:   '@(__nope__)\n_keep'
E           
E           Expected stdout: '@(__nope__)\n@(__nope__*|__nope__?|*|?|[:alpha:]||)'
E           Actual stdout:   '@(__nope__)\n_keep\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s extglob
E           echo @(__nope__)
E           
E           # OSH has glob quoting here
E           echo @(__nope__*|__nope__?|'*'|'?'|'[:alpha:]'|'|')
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[extglob-match.test.sh::case with extglob[L219]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092979b0>
test_file = 'extglob-match.test.sh'
test_case = TestCase(name='case with extglob', script='shopt -s extglob\nfor word in --help --verbose --unmatched -- -zxzx -; do\n...assertions=[Assertion(type='stdout', value='A\nA\nU\nB\nC\nD', shells=None, variant=None)], line_number=219, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: case with extglob (line 219)
E           
E           stdout mismatch:
E             expected: 'A\nA\nU\nB\nC\nD'
E             actual:   ''
E           
E           Expected stdout: 'A\nA\nU\nB\nC\nD'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s extglob
E           for word in --help --verbose --unmatched -- -zxzx -; do
E             case $word in
E               --@(help|verbose) )
E                 echo A
E                 continue
E                 ;;
E               ( --?(b|c) )
E                 echo B
E                 continue
E                 ;;
E               ( -+(x|z) )
E                 echo C
E                 continue
E                 ;;
E               ( -*(x|z) )
E                 echo D
E                 continue
E                 ;;
E               *)
E                 echo U
E                 continue
E                 ;;
E             esac
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[extglob-match.test.sh::[[ $x == !($str) ]][L254]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109297a70>
test_file = 'extglob-match.test.sh'
test_case = TestCase(name='[[ $x == !($str) ]]', script="shopt -s extglob\nempty=''\nstr='x'\n[[ $empty == !($str) ]] && echo TRUE...SE", assertions=[Assertion(type='stdout', value='TRUE\nFALSE', shells=None, variant=None)], line_number=254, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ $x == !($str) ]] (line 254)
E           
E           Execution error: Expected ']]' to close conditional at line 4, column 15
E           
E           
E           Script:
E           ---
E           shopt -s extglob
E           empty=''
E           str='x'
E           [[ $empty == !($str) ]] && echo TRUE  # test glob match
E           [[ $str == !($str) ]]   || echo FALSE
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[extglob-match.test.sh::Turning extglob on changes the meaning of [[ !(str) ]] in bash[L265]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109297b30>
test_file = 'extglob-match.test.sh'
test_case = TestCase(name='Turning extglob on changes the meaning of [[ !(str) ]] in bash', script="empty=''\nstr='x'\n[[ !($empty...Assertion(type='stdout', value='TRUE\nTRUE\nTRUE', shells=['mksh', 'ksh'], variant='N-I')], line_number=265, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Turning extglob on changes the meaning of [[ !(str) ]] in bash (line 265)
E           
E           stdout mismatch:
E             expected: 'TRUE\nFALSE\nTRUE\nTRUE'
E             actual:   'TRUE\nFALSE\nTRUE'
E           
E           Expected stdout: 'TRUE\nFALSE\nTRUE\nTRUE'
E           Actual stdout:   'TRUE\nFALSE\nTRUE\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           empty=''
E           str='x'
E           [[ !($empty) ]]  && echo TRUE   # test if $empty is empty
E           [[ !($str) ]]    || echo FALSE  # test if $str is empty
E           shopt -s extglob  # mksh doesn't have this
E           [[ !($empty) ]]  && echo TRUE   # negated glob
E           [[ !($str) ]]    && echo TRUE   # negated glob
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[extglob-match.test.sh::With extglob on, !($str) on the left or right of == has different meanings[L285]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109297bf0>
test_file = 'extglob-match.test.sh'
test_case = TestCase(name='With extglob on, !($str) on the left or right of == has different meanings', script="shopt -s extglob\n...lob match", assertions=[Assertion(type='stdout', value='TRUE', shells=None, variant=None)], line_number=285, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: With extglob on, !($str) on the left or right of == has different meanings (line 285)
E           
E           Execution error: Expected ']]' to close conditional at line 3, column 10
E           
E           
E           Script:
E           ---
E           shopt -s extglob
E           str='x'
E           [[ 1 == !($str) ]]  && echo TRUE   # glob match
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[fatal-errors.test.sh::Unrecoverable: divide by zero in redirect word[L8]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b4170>
test_file = 'fatal-errors.test.sh'
test_case = TestCase(name='Unrecoverable: divide by zero in redirect word', script="$SH -c '\necho hi > file$(( 42 / 0 )) in\necho... Assertion(type='stdout', value='outside=1\n---\noutside=0', shells=['zsh'], variant='BUG')], line_number=8, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Unrecoverable: divide by zero in redirect word (line 8)
E           
E           stdout mismatch:
E             expected: "inside=1\noutside=0\n## END:\n\n\n#### Unrecoverable: divide by zero in conditional word\n\n$SH -c '\nif test foo$(( 42 / 0 )) = foo; then\n  echo true\nelse\n  echo false\nfi\necho inside=$?\n'\necho outside=$?\n\necho ---\n\n$SH -c '\nif test foo$(( 42 / 0 )) = foo; then\n  echo true\nfi\necho inside=$?\n'\necho outside=$?"
E             actual:   ''
E           
E           Expected stdout: "inside=1\noutside=0\n## END:\n\n\n#### Unrecoverable: divide by zero in conditional word\n\n$SH -c '\nif test foo$(( 42 / 0 )) = foo; then\n  echo true\nelse\n  echo false\nfi\necho inside=$?\n'\necho outside=$?\n\necho ---\n\n$SH -c '\nif test foo$(( 42 / 0 )) = foo; then\n  echo true\nfi\necho inside=$?\n'\necho outside=$?\n"
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: division by 0\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           $SH -c '
E           echo hi > file$(( 42 / 0 )) in
E           echo inside=$?
E           '
E           echo outside=$?
E           
E           
E           
E           # bash makes the command fail
E           
E           
E           # bash makes the command fail
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[fatal-errors.test.sh::Unrecoverable: divide by zero in case[L83]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b4230>
test_file = 'fatal-errors.test.sh'
test_case = TestCase(name='Unrecoverable: divide by zero in case', script="$SH -c '\ncase $(( 42 / 0 )) in\n  (*) echo hi ;;\nesac...Assertion(type='stdout', value='outside=0\n---\noutside=0', shells=['zsh'], variant='BUG')], line_number=83, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Unrecoverable: divide by zero in case (line 83)
E           
E           Execution error: Expected pattern in case item at line 1, column 22
E           
E           
E           Script:
E           ---
E           $SH -c '
E           case $(( 42 / 0 )) in
E             (*) echo hi ;;
E           esac
E           echo inside=$?
E           '
E           echo outside=$?
E           
E           echo ---
E           
E           $SH -c '
E           case foo in
E             ( $(( 42 / 0 )) )
E               echo hi
E               ;;
E           esac
E           echo inside=$?
E           '
E           echo outside=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[fatal-errors.test.sh::Unrecoverable: ${undef?message}[L132]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b42f0>
test_file = 'fatal-errors.test.sh'
test_case = TestCase(name='Unrecoverable: ${undef?message}', script="$SH -c '\necho ${undef?message}\necho inside=$?\n'\necho outs...Assertion(type='stdout', value='outside=127\noutside=127', shells=['bash'], variant='OK')], line_number=132, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Unrecoverable: ${undef?message} (line 132)
E           
E           stdout mismatch:
E             expected: 'outside=127\noutside=127'
E             actual:   ''
E           
E           Expected stdout: 'outside=127\noutside=127'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: message\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           $SH -c '
E           echo ${undef?message}
E           echo inside=$?
E           '
E           echo outside=$?
E           
E           $SH -c '
E           case ${undef?message} in 
E             (*) echo hi ;;
E           esac
E           echo inside=$?
E           '
E           echo outside=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[fatal-errors.test.sh::${undef} with nounset[L161]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b43b0>
test_file = 'fatal-errors.test.sh'
test_case = TestCase(name='${undef} with nounset', script="$SH -c '\nset -o nounset\ncase ${undef} in \n  (*) echo hi ;;\nesac\nec...variant='OK'), Assertion(type='stdout', value='outside=0', shells=['zsh'], variant='BUG')], line_number=161, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${undef} with nounset (line 161)
E           
E           Execution error: Expected pattern in case item at line 1, column 33
E           
E           
E           Script:
E           ---
E           $SH -c '
E           set -o nounset
E           case ${undef} in 
E             (*) echo hi ;;
E           esac
E           echo inside=$?
E           '
E           echo outside=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[for-expr.test.sh::Accepts { } syntax too[L43]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b45f0>
test_file = 'for-expr.test.sh'
test_case = TestCase(name='Accepts { } syntax too', script='for ((a=1; a <= 3; a++)) {\n  echo $a\n}', assertions=[Assertion(type='stdout', value='1\n2\n3', shells=None, variant=None)], line_number=43, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Accepts { } syntax too (line 43)
E           
E           Execution error: Expected 'do' in for loop at line 1, column 26
E           
E           
E           Script:
E           ---
E           for ((a=1; a <= 3; a++)) {
E             echo $a
E           }
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[for-expr.test.sh::Arith lexer mode[L96]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b48f0>
test_file = 'for-expr.test.sh'
test_case = TestCase(name='Arith lexer mode', script='# bash is lenient; zsh disagrees\n\nfor ((i = \'3\';  i < \'5\';  ++i)); do ...s=['zsh'], variant='OK'), Assertion(type='stdout', value='', shells=['zsh'], variant='OK')], line_number=96, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Arith lexer mode (line 96)
E           
E           stdout mismatch:
E             expected: '3\n4\n3\n4\n3\n4\n3\n4'
E             actual:   '3\n4\n3\n4\n3\n4'
E           
E           Expected stdout: '3\n4\n3\n4\n3\n4\n3\n4'
E           Actual stdout:   '3\n4\n3\n4\n3\n4\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash is lenient; zsh disagrees
E           
E           for ((i = '3';  i < '5';  ++i)); do echo $i; done
E           for ((i = "3";  i < "5";  ++i)); do echo $i; done
E           for ((i = $'3'; i < $'5'; ++i)); do echo $i; done
E           for ((i = $"3"; i < $"5"; ++i)); do echo $i; done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[for-expr.test.sh::Condition that's greater than 32 bits[L156]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b4a70>
test_file = 'for-expr.test.sh'
test_case = TestCase(name="Condition that's greater than 32 bits", script='iters=0\n\nfor ((i = 1 << 32; i; ++i)); do\n  echo $i\n...='4294967296\n4294967297\n4294967298\n4294967299\n4294967300', shells=None, variant=None)], line_number=156, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Condition that's greater than 32 bits (line 156)
E           
E           Execution error: Expected 'done' to close for loop at line 4, column 3
E           
E           
E           Script:
E           ---
E           iters=0
E           
E           for ((i = 1 << 32; i; ++i)); do
E             echo $i
E             iters=$(( iters + 1 ))
E             if test $iters -eq 5; then
E               break
E             fi
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[func-parsing.test.sh::Hard case, function with } token in it[L43]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b4fb0>
test_file = 'func-parsing.test.sh'
test_case = TestCase(name='Hard case, function with } token in it', script='rbrace() { echo }; }; rbrace', assertions=[Assertion(type='stdout', value='}', shells=None, variant=None)], line_number=43, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Hard case, function with } token in it (line 43)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 1, column 20
E           
E           
E           Script:
E           ---
E           rbrace() { echo }; }; rbrace
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[func-parsing.test.sh::Function name with $[L66]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b51f0>
test_file = 'func-parsing.test.sh'
test_case = TestCase(name='Function name with $', script='$foo-bar() { ls ; }', assertions=[Assertion(type='status', value=2, shel...e, variant=None), Assertion(type='status', value=1, shells=['bash', 'mksh'], variant='OK')], line_number=66, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Function name with $ (line 66)
E           
E           status mismatch: expected one of [2, 1], got 0
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           $foo-bar() { ls ; }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[func-parsing.test.sh::Function name with command sub[L71]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b52b0>
test_file = 'func-parsing.test.sh'
test_case = TestCase(name='Function name with command sub', script='foo-$(echo hi)() { ls ; }', assertions=[Assertion(type='status...e, variant=None), Assertion(type='status', value=1, shells=['bash', 'mksh'], variant='OK')], line_number=71, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Function name with command sub (line 71)
E           
E           status mismatch: expected one of [2, 1], got 0
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           foo-$(echo hi)() { ls ; }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob-bash.test.sh::shopt -s failglob in loop context[L31]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b57f0>
test_file = 'glob-bash.test.sh'
test_case = TestCase(name='shopt -s failglob in loop context', script='for x in *.ZZ; do echo $x; done\necho status=$?\nshopt -s f...t', value='*.ZZ\nstatus=0\n*.ZZ\nstatus=0', shells=['dash', 'mksh', 'ash'], variant='N-I')], line_number=31, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: shopt -s failglob in loop context (line 31)
E           
E           stdout mismatch:
E             expected: '*.ZZ\nstatus=0\nstatus=1'
E             actual:   '*.ZZ\nstatus=0\nstatus=0'
E           
E           Expected stdout: '*.ZZ\nstatus=0\nstatus=1'
E           Actual stdout:   '*.ZZ\nstatus=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           for x in *.ZZ; do echo $x; done
E           echo status=$?
E           shopt -s failglob
E           for x in *.ZZ; do echo $x; done
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob-bash.test.sh::shopt -s failglob in array literal context[L49]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b58b0>
test_file = 'glob-bash.test.sh'
test_case = TestCase(name='shopt -s failglob in array literal context', script='myarr=(*.ZZ)\necho "${myarr[@]}"\nshopt -s failglo..., variant='N-I'), Assertion(type='status', value=2, shells=['dash', 'ash'], variant='N-I')], line_number=49, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: shopt -s failglob in array literal context (line 49)
E           
E           stdout mismatch:
E             expected: '*.ZZ\nstatus=1'
E             actual:   '*.ZZ\nstatus=0'
E           
E           Expected stdout: '*.ZZ\nstatus=1'
E           Actual stdout:   '*.ZZ\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           myarr=(*.ZZ)
E           echo "${myarr[@]}"
E           shopt -s failglob
E           myarr=(*.ZZ)
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob-bash.test.sh::shopt -s failglob exits properly in loop context with set -e[L81]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b5a30>
test_file = 'glob-bash.test.sh'
test_case = TestCase(name='shopt -s failglob exits properly in loop context with set -e', script='set -e\nfor x in *.ZZ; do echo $...tion(type='stdout', value='*.ZZ\nstatus=0', shells=['dash', 'mksh', 'ash'], variant='N-I')], line_number=81, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: shopt -s failglob exits properly in loop context with set -e (line 81)
E           
E           stdout mismatch:
E             expected: '*.ZZ\nstatus=0'
E             actual:   '*.ZZ\nstatus=0\nstatus=0'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: '*.ZZ\nstatus=0'
E           Actual stdout:   '*.ZZ\nstatus=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           set -e
E           for x in *.ZZ; do echo $x; done
E           echo status=$?
E           
E           shopt -s failglob
E           for x in *.ZZ; do echo $x; done
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob-bash.test.sh::shopt -s failglob behavior on single line with semicolon[L102]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b5af0>
test_file = 'glob-bash.test.sh'
test_case = TestCase(name='shopt -s failglob behavior on single line with semicolon', script="# bash behaves differently when comm...', value='*.ZZ\nstatus=0\n*.ZZ\nstatus=0', shells=['dash', 'mksh', 'ash'], variant='N-I')], line_number=102, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: shopt -s failglob behavior on single line with semicolon (line 102)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   'status=1\nstatus=1'
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   'status=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: no match: *.ZZ\nbash: no match: *.ZZ\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash behaves differently when commands are separated by a semicolon than when
E           # separated by a newline. This behavior doesn't make sense or seem to be
E           # intentional, so osh does not mimic it.
E           
E           shopt -s failglob
E           echo *.ZZ; echo status=$? # bash doesn't execute the second part!
E           echo *.ZZ
E           echo status=$? # bash executes this
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob.test.sh::glob can expand to command and arg[L37]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b61b0>
test_file = 'glob.test.sh'
test_case = TestCase(name='glob can expand to command and arg', script='cd $REPO_ROOT\nspec/testdata/echo.s[hz]', assertions=[Assertion(type='stdout', value='spec/testdata/echo.sz', shells=None, variant=None)], line_number=37, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: glob can expand to command and arg (line 37)
E           
E           stdout mismatch:
E             expected: 'spec/testdata/echo.sz'
E             actual:   ''
E           
E           Expected stdout: 'spec/testdata/echo.sz'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: spec/testdata/echo.s[hz]: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           cd $REPO_ROOT
E           spec/testdata/echo.s[hz]
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob.test.sh::glob after var expansion[L43]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b6270>
test_file = 'glob.test.sh'
test_case = TestCase(name='glob after var expansion', script='touch _tmp/a.A _tmp/aa.A _tmp/b.B\nf="_tmp/*.A"\ng="$f _tmp/*.B"\nec...=[Assertion(type='stdout', value='_tmp/a.A _tmp/aa.A _tmp/b.B', shells=None, variant=None)], line_number=43, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: glob after var expansion (line 43)
E           
E           stdout mismatch:
E             expected: '_tmp/a.A _tmp/aa.A _tmp/b.B'
E             actual:   '_tmp/*.A _tmp/*.B'
E           
E           Expected stdout: '_tmp/a.A _tmp/aa.A _tmp/b.B'
E           Actual stdout:   '_tmp/*.A _tmp/*.B\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch _tmp/a.A _tmp/aa.A _tmp/b.B
E           f="_tmp/*.A"
E           g="$f _tmp/*.B"
E           echo $g
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob.test.sh::store literal globs in array then expand[L76]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b6630>
test_file = 'glob.test.sh'
test_case = TestCase(name='store literal globs in array then expand', script='touch _tmp/a.A _tmp/aa.A _tmp/b.B\ng=("_tmp/*.A" "_t..., variant='N-I'), Assertion(type='status', value=2, shells=['dash', 'ash'], variant='N-I')], line_number=76, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: store literal globs in array then expand (line 76)
E           
E           stdout mismatch:
E             expected: '_tmp/a.A _tmp/aa.A _tmp/b.B'
E             actual:   '_tmp/*.A _tmp/*.B'
E           
E           Expected stdout: '_tmp/a.A _tmp/aa.A _tmp/b.B'
E           Actual stdout:   '_tmp/*.A _tmp/*.B\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch _tmp/a.A _tmp/aa.A _tmp/b.B
E           g=("_tmp/*.A" "_tmp/*.B")
E           echo ${g[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[glob.test.sh::glob inside array[L84]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b66f0>
test_file = 'glob.test.sh'
test_case = TestCase(name='glob inside array', script='touch _tmp/a.A _tmp/aa.A _tmp/b.B\ng=(_tmp/*.A _tmp/*.B)\necho "${g[@]}"', ..., variant='N-I'), Assertion(type='status', value=2, shells=['dash', 'ash'], variant='N-I')], line_number=84, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: glob inside array (line 84)
E           
E           stdout mismatch:
E             expected: '_tmp/a.A _tmp/aa.A _tmp/b.B'
E             actual:   '_tmp/*.A _tmp/*.B'
E           
E           Expected stdout: '_tmp/a.A _tmp/aa.A _tmp/b.B'
E           Actual stdout:   '_tmp/*.A _tmp/*.B\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch _tmp/a.A _tmp/aa.A _tmp/b.B
E           g=(_tmp/*.A _tmp/*.B)
E           echo "${g[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob.test.sh::glob with escaped - in char class[L92]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b67b0>
test_file = 'glob.test.sh'
test_case = TestCase(name='glob with escaped - in char class', script='touch _tmp/foo.-\ntouch _tmp/c.C\necho _tmp/*.[C-D] _tmp/*....[Assertion(type='stdout', value='_tmp/c.C _tmp/c.C _tmp/foo.-', shells=None, variant=None)], line_number=92, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: glob with escaped - in char class (line 92)
E           
E           stdout mismatch:
E             expected: '_tmp/c.C _tmp/c.C _tmp/foo.-'
E             actual:   '_tmp/c.C _tmp/c.C'
E           
E           Expected stdout: '_tmp/c.C _tmp/c.C _tmp/foo.-'
E           Actual stdout:   '_tmp/c.C _tmp/c.C\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch _tmp/foo.-
E           touch _tmp/c.C
E           echo _tmp/*.[C-D] _tmp/*.[C\-D]
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob.test.sh::glob with char class expression[L98]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b6870>
test_file = 'glob.test.sh'
test_case = TestCase(name='glob with char class expression', script="# note: mksh doesn't support [[:punct:]] ?\ntouch _tmp/e.E _t...ne), Assertion(type='stdout', value='_tmp/*.[[:punct:]E]', shells=['mksh'], variant='BUG')], line_number=98, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: glob with char class expression (line 98)
E           
E           stdout mismatch:
E             expected: '_tmp/e.E _tmp/foo.-'
E             actual:   '_tmp/*.[[:punct:]E]'
E           
E           Expected stdout: '_tmp/e.E _tmp/foo.-'
E           Actual stdout:   '_tmp/*.[[:punct:]E]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # note: mksh doesn't support [[:punct:]] ?
E           touch _tmp/e.E _tmp/foo.-
E           echo _tmp/*.[[:punct:]E]
E           ---

tests/spec_tests/test_spec.py:218: Failed
______ TestBashSpecTests.test_spec_case[glob.test.sh::glob escaped[L111]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b69f0>
test_file = 'glob.test.sh'
test_case = TestCase(name='glob escaped', script="# - mksh doesn't support [[:punct:]] ?\n# - python shell fails because \\[ not s...ssertions=[Assertion(type='stdout', value='_tmp/[abc] _tmp/?', shells=None, variant=None)], line_number=111, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: glob escaped (line 111)
E           
E           stdout mismatch:
E             expected: '_tmp/[abc] _tmp/?'
E             actual:   '_tmp/? _tmp/?'
E           
E           Expected stdout: '_tmp/[abc] _tmp/?'
E           Actual stdout:   '_tmp/? _tmp/?\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # - mksh doesn't support [[:punct:]] ?
E           # - python shell fails because \[ not supported!
E           touch _tmp/\[abc\] _tmp/\?
E           echo _tmp/\[???\] _tmp/\?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_______ TestBashSpecTests.test_spec_case[glob.test.sh::: escaped[L118]] ________

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b6ab0>
test_file = 'glob.test.sh'
test_case = TestCase(name=': escaped', script='touch _tmp/foo.-\necho _tmp/*.[[:punct:]] _tmp/*.[[:punct\\:]]', assertions=[Assert...tion(type='stdout', value='_tmp/foo.- _tmp/foo.-', shells=['bash', 'ash'], variant='BUG')], line_number=118, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: : escaped (line 118)
E           
E           stdout mismatch:
E             expected: '_tmp/foo.- _tmp/foo.-'
E             actual:   '_tmp/*.[[:punct:]] _tmp/*.[[:punct:]]'
E           
E           Expected stdout: '_tmp/foo.- _tmp/foo.-'
E           Actual stdout:   '_tmp/*.[[:punct:]] _tmp/*.[[:punct:]]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch _tmp/foo.-
E           echo _tmp/*.[[:punct:]] _tmp/*.[[:punct\:]]
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[glob.test.sh::set -o noglob[L153]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b6db0>
test_file = 'glob.test.sh'
test_case = TestCase(name='set -o noglob', script='cd $REPO_ROOT\ntouch _tmp/spec-tmp/a.zz _tmp/spec-tmp/b.zz\necho _tmp/spec-tmp/...e='_tmp/spec-tmp/a.zz _tmp/spec-tmp/b.zz\n_tmp/spec-tmp/*.zz', shells=None, variant=None)], line_number=153, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set -o noglob (line 153)
E           
E           stdout mismatch:
E             expected: '_tmp/spec-tmp/a.zz _tmp/spec-tmp/b.zz\n_tmp/spec-tmp/*.zz'
E             actual:   '_tmp/spec-tmp/a.zz _tmp/spec-tmp/b.zz\n_tmp/spec-tmp/a.zz _tmp/spec-tmp/b.zz'
E           
E           Expected stdout: '_tmp/spec-tmp/a.zz _tmp/spec-tmp/b.zz\n_tmp/spec-tmp/*.zz'
E           Actual stdout:   '_tmp/spec-tmp/a.zz _tmp/spec-tmp/b.zz\n_tmp/spec-tmp/a.zz _tmp/spec-tmp/b.zz\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: noglob: invalid option name\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           cd $REPO_ROOT
E           touch _tmp/spec-tmp/a.zz _tmp/spec-tmp/b.zz
E           echo _tmp/spec-tmp/*.zz
E           set -o noglob
E           echo _tmp/spec-tmp/*.zz
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob.test.sh::Splitting/Globbing doesn't happen on local assignment[L172]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b6f30>
test_file = 'glob.test.sh'
test_case = TestCase(name="Splitting/Globbing doesn't happen on local assignment", script='cd $REPO_ROOT\n\nf() {\n  # Dash splits...dash'], variant='BUG'), Assertion(type='status', value=2, shells=['dash'], variant='BUG')], line_number=172, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Splitting/Globbing doesn't happen on local assignment (line 172)
E           
E           stdout mismatch:
E             expected: 'void *'
E             actual:   'void'
E           
E           Expected stdout: 'void *'
E           Actual stdout:   'void\n'
E           Expected stderr: None
E           Actual stderr:   "bash: local: '*': not a valid identifier\n"
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           cd $REPO_ROOT
E           
E           f() {
E             # Dash splits words and globs before handing it to the 'local' builtin.  But
E             # ash doesn't!
E             local foo=$1
E             echo "$foo"
E           }
E           f 'void *'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob.test.sh::Glob of unescaped [[] and []][L186]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b6ff0>
test_file = 'glob.test.sh'
test_case = TestCase(name='Glob of unescaped [[] and []]', script='touch $TMP/[ $TMP/]\ncd $TMP\necho [\\[z] [\\]z]  # the right w...epted', assertions=[Assertion(type='stdout', value='[ ]\n[ ]', shells=None, variant=None)], line_number=186, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Glob of unescaped [[] and []] (line 186)
E           
E           Execution error: Expected ']]' to close conditional at line 4, column 11
E           
E           
E           Script:
E           ---
E           touch $TMP/[ $TMP/]
E           cd $TMP
E           echo [\[z] [\]z]  # the right way to do it
E           echo [[z] []z]    # also accepted
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob.test.sh::Glob of negated unescaped [[] and []][L196]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b70b0>
test_file = 'glob.test.sh'
test_case = TestCase(name='Glob of negated unescaped [[] and []]', script='# osh does this "correctly" because it defers to libc!\...e='stdout', value='_[^[z] _[^]z]\n_[^[z] _[^]z]', shells=['dash', 'mksh'], variant='BUG')], line_number=196, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Glob of negated unescaped [[] and []] (line 196)
E           
E           stdout mismatch:
E             expected: '_G _G\n_G _G'
E             actual:   '_[^[z] _[^]z]\n_[^[z] _[^]z]'
E           
E           Expected stdout: '_G _G\n_G _G'
E           Actual stdout:   '_[^[z] _[^]z]\n_[^[z] _[^]z]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # osh does this "correctly" because it defers to libc!
E           touch $TMP/_G
E           cd $TMP
E           echo _[^\[z] _[^\]z]  # the right way to do it
E           echo _[^[z] _[^]z]    # also accepted
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob.test.sh::Glob ordering respects LC_COLLATE (zsh respects this too)[L262]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b73b0>
test_file = 'glob.test.sh'
test_case = TestCase(name='Glob ordering respects LC_COLLATE (zsh respects this too)', script="# test/spec-common.sh sets LC_ALL=C...hello-test.sh hello.py hello_preamble.sh', shells=['dash', 'mksh', 'ash'], variant='N-I')], line_number=262, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Glob ordering respects LC_COLLATE (zsh respects this too) (line 262)
E           
E           stdout mismatch:
E             expected: 'hello hello-test.sh hello.py hello_preamble.sh\nhello hello_preamble.sh hello.py hello-test.sh\nhello hello_preamble.sh hello.py hello-test.sh'
E             actual:   'hello hello-test.sh hello.py hello_preamble.sh\nhello hello-test.sh hello.py hello_preamble.sh\nhello hello-test.sh hello.py hello_preamble.sh'
E           
E           Expected stdout: 'hello hello-test.sh hello.py hello_preamble.sh\nhello hello_preamble.sh hello.py hello-test.sh\nhello hello_preamble.sh hello.py hello-test.sh'
E           Actual stdout:   'hello hello-test.sh hello.py hello_preamble.sh\nhello hello-test.sh hello.py hello_preamble.sh\nhello hello-test.sh hello.py hello_preamble.sh\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # test/spec-common.sh sets LC_ALL=C.UTF_8
E           unset LC_ALL
E           
E           touch hello hello.py hello_preamble.sh hello-test.sh
E           echo h*
E           
E           # bash - hello_preamble.h comes first
E           # But ord('_') == 95 
E           #     ord('-') == 45
E           
E           # https://serverfault.com/questions/122737/in-bash-are-wildcard-expansions-guaranteed-to-be-in-order
E           
E           #LC_COLLATE=C.UTF-8
E           LC_COLLATE=en_US.UTF-8  # en_US is necessary
E           echo h*
E           
E           LC_COLLATE=en_US.UTF-8 $SH -c 'echo h*'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob.test.sh::\\ in unquoted substitutions does not match a backslash[L296]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b7470>
test_file = 'glob.test.sh'
test_case = TestCase(name='\\ in unquoted substitutions does not match a backslash', script='mkdir x\ntouch \\\n  x/test.ifs.\\\\....\\\\a.txt\']\n[\'x/test.ifs.\\\\b.txt\']', shells=['mksh', 'ksh', 'yash'], variant='BUG')], line_number=296, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \ in unquoted substitutions does not match a backslash (line 296)
E           
E           stdout mismatch:
E             expected: '[\'x/*\\\\*.txt\']\n["x/test.ifs.\'.txt"]\n[\'x/test.ifs.a.txt\']\n[\'x/test.ifs.\\\\b.txt\']'
E             actual:   '[\'x/*\\\\*.txt\']\n["x/*\\\\\'.txt"]\n[\'x/*\\\\a.txt\']\n[\'x/*\\\\b.txt\']'
E           
E           Expected stdout: '[\'x/*\\\\*.txt\']\n["x/test.ifs.\'.txt"]\n[\'x/test.ifs.a.txt\']\n[\'x/test.ifs.\\\\b.txt\']'
E           Actual stdout:   '[\'x/*\\\\*.txt\']\n["x/*\\\\\'.txt"]\n[\'x/*\\\\a.txt\']\n[\'x/*\\\\b.txt\']\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkdir x
E           touch \
E             x/test.ifs.\\.txt \
E             x/test.ifs.\'.txt \
E             x/test.ifs.a.txt \
E             x/test.ifs.\\b.txt
E           
E           v="*\\*.txt"
E           argv.py x/$v
E           
E           v="*\'.txt"
E           argv.py x/$v
E           
E           v='*\a.txt'
E           argv.py x/$v
E           
E           v='*\b.txt'
E           argv.py x/$v
E           
E           
E           # 3 shells treat \ in unquoted substitution $v as literal \
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob.test.sh::\\ in unquoted substitutions escapes globchars[L365]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b7770>
test_file = 'glob.test.sh'
test_case = TestCase(name='\\ in unquoted substitutions escapes globchars', script='mkdir x\ntouch \\\n  \'x/test.ifs.\\.txt\' \\\....ifs.\\\\.txt']\n['x/test.ifs.\\\\.txt']", shells=['mksh', 'ksh', 'yash'], variant='BUG')], line_number=365, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \ in unquoted substitutions escapes globchars (line 365)
E           
E           stdout mismatch:
E             expected: "['x/test.ifs.*.txt']\n['x/test.ifs.*.txt']\n['x/test.ifs.*.txt']"
E             actual:   '[\'x/test.ifs.\\\\.txt\']\n[\'x/*\\\\"*.txt\']\n[\'x/*\\\\"*.txt\']'
E           
E           Expected stdout: "['x/test.ifs.*.txt']\n['x/test.ifs.*.txt']\n['x/test.ifs.*.txt']"
E           Actual stdout:   '[\'x/test.ifs.\\\\.txt\']\n[\'x/*\\\\"*.txt\']\n[\'x/*\\\\"*.txt\']\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkdir x
E           touch \
E             'x/test.ifs.\.txt' \
E             'x/test.ifs.*.txt'
E           
E           v='*\*.txt'
E           argv.py x/$v
E           
E           v="\\" u='*.txt'
E           argv.py x/*$v$u
E           
E           v="\\" u="*.txt"
E           argv.py x/*$v*.txt
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[glob.test.sh::shopt -u globskipdots shows . and ..[L402]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b78f0>
test_file = 'glob.test.sh'
test_case = TestCase(name='shopt -u globskipdots shows . and ..', script='case $SH in dash|ash|mksh) exit ;; esac\n\nshopt -u glob...=None), Assertion(type='stdout', value='', shells=['dash', 'ash', 'mksh'], variant='N-I')], line_number=402, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: shopt -u globskipdots shows . and .. (line 402)
E           
E           stdout mismatch:
E             expected: 'hi . ..'
E             actual:   'hi .*'
E           
E           Expected stdout: 'hi . ..'
E           Actual stdout:   'hi .*\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|ash|mksh) exit ;; esac
E           
E           shopt -u globskipdots
E           echo hi .*
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[globignore.test.sh::Don't glob flags on file system with GLOBIGNORE[L5]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b79b0>
test_file = 'globignore.test.sh'
test_case = TestCase(name="Don't glob flags on file system with GLOBIGNORE", script='touch _tmp/-n _tmp/zzzzz\ncd _tmp\nGLOBIGNORE...mksh', 'ash'], variant='N-I'), Assertion(type='status', value=0, shells=None, variant=None)], line_number=5, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Don't glob flags on file system with GLOBIGNORE (line 5)
E           
E           stdout mismatch:
E             expected: '-* hello zzzz?'
E             actual:   'hello zzzzz'
E           
E           Expected stdout: '-* hello zzzz?'
E           Actual stdout:   'hello zzzzz'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           touch _tmp/-n _tmp/zzzzz
E           cd _tmp
E           GLOBIGNORE=-*:zzzzz  # colon-separated pattern list
E           echo -* hello zzzz?
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[globignore.test.sh::Ignore *.txt[L16]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b7a70>
test_file = 'globignore.test.sh'
test_case = TestCase(name='Ignore *.txt', script='touch one.md one.txt\nmkdir -p foo\ntouch foo/{two.md,two.txt}\nGLOBIGNORE=*.txt...Assertion(type='stdout', value='one.md foo/two.md foo/two.txt', shells=None, variant=None)], line_number=16, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Ignore *.txt (line 16)
E           
E           stdout mismatch:
E             expected: 'one.md foo/two.md foo/two.txt'
E             actual:   'one.md one.txt foo/two.md foo/two.txt'
E           
E           Expected stdout: 'one.md foo/two.md foo/two.txt'
E           Actual stdout:   'one.md one.txt foo/two.md foo/two.txt\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch one.md one.txt
E           mkdir -p foo
E           touch foo/{two.md,two.txt}
E           GLOBIGNORE=*.txt
E           echo *.* foo/*.*
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[globignore.test.sh::Ignore ?.txt[L26]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b7b30>
test_file = 'globignore.test.sh'
test_case = TestCase(name='Ignore ?.txt', script='touch {1,10}.txt\nmkdir -p foo\ntouch foo/{2,20}.txt\nGLOBIGNORE=?.txt\necho *.*...=[Assertion(type='stdout', value='10.txt foo/2.txt foo/20.txt', shells=None, variant=None)], line_number=26, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Ignore ?.txt (line 26)
E           
E           stdout mismatch:
E             expected: '10.txt foo/2.txt foo/20.txt'
E             actual:   '1.txt 10.txt foo/2.txt foo/20.txt'
E           
E           Expected stdout: '10.txt foo/2.txt foo/20.txt'
E           Actual stdout:   '1.txt 10.txt foo/2.txt foo/20.txt\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch {1,10}.txt
E           mkdir -p foo
E           touch foo/{2,20}.txt
E           GLOBIGNORE=?.txt
E           echo *.* foo/*.*
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[globignore.test.sh::Ignore *.o:*.h[L36]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b7bf0>
test_file = 'globignore.test.sh'
test_case = TestCase(name='Ignore *.o:*.h', script='touch {hello.c,hello.h,hello.o,hello}\nGLOBIGNORE=*.o:*.h\necho hello*', assertions=[Assertion(type='stdout', value='hello hello.c', shells=None, variant=None)], line_number=36, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Ignore *.o:*.h (line 36)
E           
E           stdout mismatch:
E             expected: 'hello hello.c'
E             actual:   'hello hello.c hello.h hello.o'
E           
E           Expected stdout: 'hello hello.c'
E           Actual stdout:   'hello hello.c hello.h hello.o\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch {hello.c,hello.h,hello.o,hello}
E           GLOBIGNORE=*.o:*.h
E           echo hello*
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[globignore.test.sh::Ignore single file src/__main__.py[L44]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b7cb0>
test_file = 'globignore.test.sh'
test_case = TestCase(name='Ignore single file src/__main__.py', script="mkdir src\ntouch src/{__init__.py,__main__.py}\nGLOBIGNORE..., assertions=[Assertion(type='stdout', value='src/__main__.py', shells=None, variant=None)], line_number=44, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Ignore single file src/__main__.py (line 44)
E           
E           stdout mismatch:
E             expected: 'src/__main__.py'
E             actual:   'src/__init__.py src/__main__.py'
E           
E           Expected stdout: 'src/__main__.py'
E           Actual stdout:   'src/__init__.py src/__main__.py\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkdir src
E           touch src/{__init__.py,__main__.py}
E           GLOBIGNORE='src/__init__.py'
E           echo src/*
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[globignore.test.sh::Ignore dirs dist/*:node_modules/*[L53]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b7d70>
test_file = 'globignore.test.sh'
test_case = TestCase(name='Ignore dirs dist/*:node_modules/*', script='mkdir {src,compose,dist,node_modules}\ntouch src/{a.js,b.js.../base.compose.yaml compose/dev.compose.yaml src/a.js src/b.js', shells=None, variant=None)], line_number=53, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Ignore dirs dist/*:node_modules/* (line 53)
E           
E           stdout mismatch:
E             expected: 'compose/base.compose.yaml compose/dev.compose.yaml src/a.js src/b.js'
E             actual:   'compose/base.compose.yaml compose/dev.compose.yaml dist/index.js node_modules/package.js src/a.js src/b.js'
E           
E           Expected stdout: 'compose/base.compose.yaml compose/dev.compose.yaml src/a.js src/b.js'
E           Actual stdout:   'compose/base.compose.yaml compose/dev.compose.yaml dist/index.js node_modules/package.js src/a.js src/b.js\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkdir {src,compose,dist,node_modules}
E           touch src/{a.js,b.js}
E           touch compose/{base.compose.yaml,dev.compose.yaml}
E           touch dist/index.js
E           touch node_modules/package.js
E           GLOBIGNORE=dist/*:node_modules/*
E           echo */*
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[globignore.test.sh::find files in subdirectory but not the ignored pattern[L65]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b7e30>
test_file = 'globignore.test.sh'
test_case = TestCase(name='find files in subdirectory but not the ignored pattern', script='mkdir {dir1,dir2}\ntouch dir1/{a.txt,i...rtions=[Assertion(type='stdout', value='dir1/a.txt dir2/a.txt', shells=None, variant=None)], line_number=65, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: find files in subdirectory but not the ignored pattern (line 65)
E           
E           stdout mismatch:
E             expected: 'dir1/a.txt dir2/a.txt'
E             actual:   'dir1/a.txt dir1/ignore.txt dir2/a.txt dir2/ignore.txt'
E           
E           Expected stdout: 'dir1/a.txt dir2/a.txt'
E           Actual stdout:   'dir1/a.txt dir1/ignore.txt dir2/a.txt dir2/ignore.txt\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkdir {dir1,dir2}
E           touch dir1/{a.txt,ignore.txt}
E           touch dir2/{a.txt,ignore.txt}
E           GLOBIGNORE=*/ignore*
E           echo */*
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[globignore.test.sh::Ignore globs with char patterns like [!ab][L75]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b7ef0>
test_file = 'globignore.test.sh'
test_case = TestCase(name='Ignore globs with char patterns like [!ab]', script='rm -rf _tmp\ntouch {a,b,c,d,A,B,C,D}\nGLOBIGNORE=*...=[Assertion(type='stdout', value='A B C D c d\nD a b c d\na b', shells=None, variant=None)], line_number=75, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Ignore globs with char patterns like [!ab] (line 75)
E           
E           stdout mismatch:
E             expected: 'A B C D c d\nD a b c d\na b'
E             actual:   'A B C D _keep a b c d\nA B C D _keep a b c d\nA B C D _keep a b c d'
E           
E           Expected stdout: 'A B C D c d\nD a b c d\na b'
E           Actual stdout:   'A B C D _keep a b c d\nA B C D _keep a b c d\nA B C D _keep a b c d\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           rm -rf _tmp
E           touch {a,b,c,d,A,B,C,D}
E           GLOBIGNORE=*[ab]*
E           echo *
E           GLOBIGNORE=*[ABC]*
E           echo *
E           GLOBIGNORE=*[!ab]*
E           echo *
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[globignore.test.sh::Ignore globs with char classes like [[:alnum:]][L91]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092b7fb0>
test_file = 'globignore.test.sh'
test_case = TestCase(name='Ignore globs with char classes like [[:alnum:]]', script="touch {_testing.py,pyproject.toml,20231114.lo...log _testing.py pyproject.toml\nhas space.docx pyproject.toml', shells=None, variant=None)], line_number=91, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Ignore globs with char classes like [[:alnum:]] (line 91)
E           
E           stdout mismatch:
E             expected: '.env _testing.py\n20231114.log has space.docx pyproject.toml\n.env 20231114.log _testing.py pyproject.toml\nhas space.docx pyproject.toml'
E             actual:   '20231114.log _testing.py has space.docx pyproject.toml\n20231114.log _testing.py has space.docx pyproject.toml\n20231114.log _testing.py has space.docx pyproject.toml\n20231114.log _testing.py has space.docx pyproject.toml'
E           
E           Expected stdout: '.env _testing.py\n20231114.log has space.docx pyproject.toml\n.env 20231114.log _testing.py pyproject.toml\nhas space.docx pyproject.toml'
E           Actual stdout:   '20231114.log _testing.py has space.docx pyproject.toml\n20231114.log _testing.py has space.docx pyproject.toml\n20231114.log _testing.py has space.docx pyproject.toml\n20231114.log _testing.py has space.docx pyproject.toml\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch {_testing.py,pyproject.toml,20231114.log,.env}
E           touch 'has space.docx'
E           GLOBIGNORE=[[:alnum:]]*
E           echo *.*
E           GLOBIGNORE=[![:alnum:]]*
E           echo *.*
E           GLOBIGNORE=*[[:space:]]*
E           echo *.*
E           GLOBIGNORE=[[:digit:]_.]*
E           echo *.*
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[globignore.test.sh::Ignore *[L109]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dc0b0>
test_file = 'globignore.test.sh'
test_case = TestCase(name='Ignore *', script='# This pattern appears in public repositories\ntouch {1.txt,2.log,3.md}\nGLOBIGNORE=*\necho *', assertions=[Assertion(type='stdout', value='*', shells=None, variant=None)], line_number=109, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Ignore * (line 109)
E           
E           stdout mismatch:
E             expected: '*'
E             actual:   '1.txt 2.log 3.md _keep'
E           
E           Expected stdout: '*'
E           Actual stdout:   '1.txt 2.log 3.md _keep\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # This pattern appears in public repositories
E           touch {1.txt,2.log,3.md}
E           GLOBIGNORE=*
E           echo *
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[globignore.test.sh::treat escaped patterns literally[L118]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dc170>
test_file = 'globignore.test.sh'
test_case = TestCase(name='treat escaped patterns literally', script='touch {escape-10.txt,escape*.txt}\nGLOBIGNORE="escape\\*.txt...', assertions=[Assertion(type='stdout', value='escape-10.txt', shells=None, variant=None)], line_number=118, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: treat escaped patterns literally (line 118)
E           
E           stdout mismatch:
E             expected: 'escape-10.txt'
E             actual:   'escape*.txt escape-10.txt'
E           
E           Expected stdout: 'escape-10.txt'
E           Actual stdout:   'escape*.txt escape-10.txt\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch {escape-10.txt,escape*.txt}
E           GLOBIGNORE="escape\*.txt"
E           echo *.*
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[globignore.test.sh::resetting globignore reverts to default behaviour[L126]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dc230>
test_file = 'globignore.test.sh'
test_case = TestCase(name='resetting globignore reverts to default behaviour', script='touch reset.txt\nGLOBIGNORE=*.txt\necho *.*..., assertions=[Assertion(type='stdout', value='*.*\nreset.txt', shells=None, variant=None)], line_number=126, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: resetting globignore reverts to default behaviour (line 126)
E           
E           stdout mismatch:
E             expected: '*.*\nreset.txt'
E             actual:   'reset.txt\nreset.txt'
E           
E           Expected stdout: '*.*\nreset.txt'
E           Actual stdout:   'reset.txt\nreset.txt\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch reset.txt
E           GLOBIGNORE=*.txt
E           echo *.*
E           GLOBIGNORE=
E           echo *.*
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[globignore.test.sh::Ignore .:..[L137]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dc2f0>
test_file = 'globignore.test.sh'
test_case = TestCase(name='Ignore .:..', script='# globskipdots is enabled by default in bash >=5.2\n# for bash <5.2 this pattern ... assertions=[Assertion(type='stdout', value='.env\n. .. .env', shells=None, variant=None)], line_number=137, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Ignore .:.. (line 137)
E           
E           stdout mismatch:
E             expected: '.env\n. .. .env'
E             actual:   '.env\n.env'
E           
E           Expected stdout: '.env\n. .. .env'
E           Actual stdout:   '.env\n.env\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # globskipdots is enabled by default in bash >=5.2
E           # for bash <5.2 this pattern is a common way to match dotfiles but not . or ..
E           shopt -u globskipdots
E           touch .env
E           GLOBIGNORE=.:..
E           echo .*
E           GLOBIGNORE=
E           echo .* | sort
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[globignore.test.sh::Quoting GLOBIGNORE[L151]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dc3b0>
test_file = 'globignore.test.sh'
test_case = TestCase(name='Quoting GLOBIGNORE', script='# each style of "ignore everything" spotted in a public repo\ntouch image....o *', assertions=[Assertion(type='stdout', value='*\n*\n*\n*', shells=None, variant=None)], line_number=151, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Quoting GLOBIGNORE (line 151)
E           
E           stdout mismatch:
E             expected: '*\n*\n*\n*'
E             actual:   '_keep image.jpeg\n_keep image.jpeg\n_keep image.jpeg\n_keep image.jpeg'
E           
E           Expected stdout: '*\n*\n*\n*'
E           Actual stdout:   '_keep image.jpeg\n_keep image.jpeg\n_keep image.jpeg\n_keep image.jpeg\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # each style of "ignore everything" spotted in a public repo
E           touch image.jpeg
E           GLOBIGNORE=*
E           echo *
E           GLOBIGNORE='*'
E           echo *
E           GLOBIGNORE="*"
E           echo *
E           GLOBIGNORE=\*
E           echo *
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[globignore.test.sh::When GLOBIGNORE is set, glob may become empty (nullglob too)[L183]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dc530>
test_file = 'globignore.test.sh'
test_case = TestCase(name='When GLOBIGNORE is set, glob may become empty (nullglob too)', script='touch -- foo.txt -foo.txt\n\nech...sertion(type='stdout', value='-foo.txt foo.txt\n*t\nnullglob', shells=None, variant=None)], line_number=183, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: When GLOBIGNORE is set, glob may become empty (nullglob too) (line 183)
E           
E           stdout mismatch:
E             expected: '-foo.txt foo.txt\n*t\nnullglob'
E             actual:   '-foo.txt foo.txt\n-foo.txt foo.txt\nnullglob -foo.txt foo.txt'
E           
E           Expected stdout: '-foo.txt foo.txt\n*t\nnullglob'
E           Actual stdout:   '-foo.txt foo.txt\n-foo.txt foo.txt\nnullglob -foo.txt foo.txt\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch -- foo.txt -foo.txt
E           
E           echo *t
E           
E           GLOBIGNORE=*.txt
E           echo *t
E           
E           shopt -s nullglob
E           echo nullglob *t
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[globignore.test.sh::Extended glob expansion combined with GLOBIGNORE[L219]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dc6b0>
test_file = 'globignore.test.sh'
test_case = TestCase(name='Extended glob expansion combined with GLOBIGNORE', script='shopt -s extglob\n\ntouch foo.cc foo.h bar.c...ype='stdout', value='bar.cc bar.h foo.cc foo.h\nbar.cc bar.h', shells=None, variant=None)], line_number=219, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Extended glob expansion combined with GLOBIGNORE (line 219)
E           
E           stdout mismatch:
E             expected: 'bar.cc bar.h foo.cc foo.h\nbar.cc bar.h'
E             actual:   'bar.cc bar.h foo.cc foo.h\nbar.cc bar.h foo.cc foo.h'
E           
E           Expected stdout: 'bar.cc bar.h foo.cc foo.h\nbar.cc bar.h'
E           Actual stdout:   'bar.cc bar.h foo.cc foo.h\nbar.cc bar.h foo.cc foo.h\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s extglob
E           
E           touch foo.cc foo.h bar.cc bar.h 
E           echo @(*.cc|*.h)
E           GLOBIGNORE=foo.*
E           echo @(*.cc|*.h)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[here-doc.test.sh::Here redirect with explicit descriptor[L22]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dccb0>
test_file = 'here-doc.test.sh'
test_case = TestCase(name='Here redirect with explicit descriptor', script="# A space between 0 and <<EOF causes it to pass '0' as...F\none\nEOF", assertions=[Assertion(type='stdout', value='one', shells=None, variant=None)], line_number=22, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Here redirect with explicit descriptor (line 22)
E           
E           stdout mismatch:
E             expected: 'one'
E             actual:   ''
E           
E           Expected stdout: 'one'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'cat: 0: No such file or directory\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # A space between 0 and <<EOF causes it to pass '0' as an arg to cat.
E           cat 0<<EOF
E           one
E           EOF
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[here-doc.test.sh::Here doc from another input file descriptor[L29]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dcd70>
test_file = 'here-doc.test.sh'
test_case = TestCase(name='Here doc from another input file descriptor', script='# NOTE: OSH fails on descriptor 9, but not descri...ns=[Assertion(type='stdout', value='8: here doc on descriptor', shells=None, variant=None)], line_number=29, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Here doc from another input file descriptor (line 29)
E           
E           stdout mismatch:
E             expected: '8: here doc on descriptor'
E             actual:   ''
E           
E           Expected stdout: '8: here doc on descriptor'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # NOTE: OSH fails on descriptor 9, but not descriptor 8?  Is this because of
E           # the Python VM?  How  to inspect state?
E           read_from_fd.py 8  8<<EOF
E           here doc on descriptor
E           EOF
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[here-doc.test.sh::Multiple here docs with different descriptors[L37]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dce30>
test_file = 'here-doc.test.sh'
test_case = TestCase(name='Multiple here docs with different descriptors', script='read_from_fd.py 0 3 <<EOF 3<<EOF3\nfd0\nEOF\nfd...', assertions=[Assertion(type='stdout', value='0: fd0\n3: fd3', shells=None, variant=None)], line_number=37, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Multiple here docs with different descriptors (line 37)
E           
E           Execution error: 'HereDocNode' object has no attribute 'parts'
E           
E           
E           Script:
E           ---
E           read_from_fd.py 0 3 <<EOF 3<<EOF3
E           fd0
E           EOF
E           fd3
E           EOF3
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[here-doc.test.sh::Two here docs -- first is ignored; second ones wins![L144]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dd5b0>
test_file = 'here-doc.test.sh'
test_case = TestCase(name='Two here docs -- first is ignored; second ones wins!', script='<<EOF1 cat <<EOF2\nhello\nEOF1\nthere\nEOF2', assertions=[Assertion(type='stdout', value='there', shells=None, variant=None)], line_number=144, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Two here docs -- first is ignored; second ones wins! (line 144)
E           
E           Execution error: 'HereDocNode' object has no attribute 'parts'
E           
E           
E           Script:
E           ---
E           <<EOF1 cat <<EOF2
E           hello
E           EOF1
E           there
E           EOF2
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[here-doc.test.sh::Here doc with line continuation, then pipe.  Syntax error.[L152]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dd670>
test_file = 'here-doc.test.sh'
test_case = TestCase(name='Here doc with line continuation, then pipe.  Syntax error.', script='cat <<EOF \\\n1\n2\n3\nEOF\n| tac'...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=152, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Here doc with line continuation, then pipe.  Syntax error. (line 152)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 6, column 1
E           
E           
E           Script:
E           ---
E           cat <<EOF \
E           1
E           2
E           3
E           EOF
E           | tac
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[here-doc.test.sh::Compound command here doc[L195]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dd970>
test_file = 'here-doc.test.sh'
test_case = TestCase(name='Compound command here doc', script='while read line; do\n  echo X $line\ndone <<EOF\n1\n2\n3\nEOF', assertions=[Assertion(type='stdout', value='X 1\nX 2\nX 3', shells=None, variant=None)], line_number=195, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Compound command here doc (line 195)
E           
E           stdout mismatch:
E             expected: 'X 1\nX 2\nX 3'
E             actual:   ''
E           
E           Expected stdout: 'X 1\nX 2\nX 3'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           while read line; do
E             echo X $line
E           done <<EOF
E           1
E           2
E           3
E           EOF
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[here-doc.test.sh::Two compound commands with two here docs[L277]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092ddd30>
test_file = 'here-doc.test.sh'
test_case = TestCase(name='Two compound commands with two here docs', script='while read line; do echo X $line; done <<EOF; echo =...ions=[Assertion(type='stdout', value='X 1\nX 2\n==\nY 3\nY 4', shells=None, variant=None)], line_number=277, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Two compound commands with two here docs (line 277)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 5, column 1
E           
E           
E           Script:
E           ---
E           while read line; do echo X $line; done <<EOF; echo ==;  while read line; do echo Y $line; done <<EOF2
E           1
E           2
E           EOF
E           3
E           4
E           EOF2
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[here-doc.test.sh::Function def and execution with here doc[L293]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dddf0>
test_file = 'here-doc.test.sh'
test_case = TestCase(name='Function def and execution with here doc', script='fun() { cat; } <<EOF; echo before; fun; echo after\n...ertions=[Assertion(type='stdout', value='before\n1\n2\nafter', shells=None, variant=None)], line_number=293, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Function def and execution with here doc (line 293)
E           
E           stdout mismatch:
E             expected: 'before\n1\n2\nafter'
E             actual:   'before\nafter'
E           
E           Expected stdout: 'before\n1\n2\nafter'
E           Actual stdout:   'before\nafter\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           fun() { cat; } <<EOF; echo before; fun; echo after
E           1
E           2
E           EOF
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[here-doc.test.sh::Multiple here docs in pipeline[L374]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092de330>
test_file = 'here-doc.test.sh'
test_case = TestCase(name='Multiple here docs in pipeline', script='case $SH in *osh) exit ;; esac\n\n# The second instance reads ...tions=[Assertion(type='stdout', value='0: 3: fd3\n5: fd5\nok', shells=None, variant=None)], line_number=374, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Multiple here docs in pipeline (line 374)
E           
E           stdout mismatch:
E             expected: '0: 3: fd3\n5: fd5\nok'
E             actual:   'fd5\nok'
E           
E           Expected stdout: '0: 3: fd3\n5: fd5\nok'
E           Actual stdout:   'fd5\nok\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in *osh) exit ;; esac
E           
E           # The second instance reads its stdin from the pipe, and fd 5 from a here doc.
E           read_from_fd.py 3 3<<EOF3 | read_from_fd.py 0 5 5<<EOF5
E           fd3
E           EOF3
E           fd5
E           EOF5
E           
E           echo ok
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[here-doc.test.sh::Multiple here docs in pipeline on multiple lines[L392]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092de3f0>
test_file = 'here-doc.test.sh'
test_case = TestCase(name='Multiple here docs in pipeline on multiple lines', script='case $SH in *osh) exit ;; esac\n\n# SKIPPED:...tions=[Assertion(type='stdout', value='0: 3: fd3\n5: fd5\nok', shells=None, variant=None)], line_number=392, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Multiple here docs in pipeline on multiple lines (line 392)
E           
E           stdout mismatch:
E             expected: '0: 3: fd3\n5: fd5\nok'
E             actual:   'fd5\nok'
E           
E           Expected stdout: '0: 3: fd3\n5: fd5\nok'
E           Actual stdout:   'fd5\nok\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in *osh) exit ;; esac
E           
E           # SKIPPED: hangs with osh on Debian
E           # The second instance reads its stdin from the pipe, and fd 5 from a here doc.
E           read_from_fd.py 3 3<<EOF3 |
E           fd3
E           EOF3
E           read_from_fd.py 0 5 5<<EOF5
E           fd5
E           EOF5
E           
E           echo ok
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[here-doc.test.sh::Here doc and backslash double quote[L412]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092de4b0>
test_file = 'here-doc.test.sh'
test_case = TestCase(name='Here doc and backslash double quote', script='cat <<EOF\na \\"quote\\"\nEOF', assertions=[Assertion(type='stdout', value='a \\"quote\\"', shells=None, variant=None)], line_number=412, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Here doc and backslash double quote (line 412)
E           
E           stdout mismatch:
E             expected: 'a \\"quote\\"'
E             actual:   'a "quote"'
E           
E           Expected stdout: 'a \\"quote\\"'
E           Actual stdout:   'a "quote"\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           cat <<EOF
E           a \"quote\"
E           EOF
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[here-doc.test.sh::Here doc escapes[L421]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092de570>
test_file = 'here-doc.test.sh'
test_case = TestCase(name='Here doc escapes', script='# these are the chars from _DQ_ESCAPED_CHAR\ncat <<EOF\n\\\\ \\" \\$ \\`\nEOF', assertions=[Assertion(type='stdout', value='\\ \\" $ `', shells=None, variant=None)], line_number=421, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Here doc escapes (line 421)
E           
E           stdout mismatch:
E             expected: '\\ \\" $ `'
E             actual:   '\\ " $ `'
E           
E           Expected stdout: '\\ \\" $ `'
E           Actual stdout:   '\\ " $ `\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # these are the chars from _DQ_ESCAPED_CHAR
E           cat <<EOF
E           \\ \" \$ \`
E           EOF
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[introspect.test.sh::${FUNCNAME[@]} array[L28]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092de9f0>
test_file = 'introspect.test.sh'
test_case = TestCase(name='${FUNCNAME[@]} array', script='g() {\n  argv.py "${FUNCNAME[@]}"\n}\nf() {\n  argv.py "${FUNCNAME[@]}"\...ons=[Assertion(type='stdout', value="['f']\n['g', 'f']\n['f']", shells=None, variant=None)], line_number=28, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${FUNCNAME[@]} array (line 28)
E           
E           stdout mismatch:
E             expected: "['f']\n['g', 'f']\n['f']"
E             actual:   '[]\n[]\n[]'
E           
E           Expected stdout: "['f']\n['g', 'f']\n['f']"
E           Actual stdout:   '[]\n[]\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           g() {
E             argv.py "${FUNCNAME[@]}"
E           }
E           f() {
E             argv.py "${FUNCNAME[@]}"
E             g
E             argv.py "${FUNCNAME[@]}"
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[introspect.test.sh::FUNCNAME with source (scalar or array)[L44]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092deab0>
test_file = 'introspect.test.sh'
test_case = TestCase(name='FUNCNAME with source (scalar or array)', script='cd $REPO_ROOT\n\n# Comments on bash quirk:\n# https://...\n['  @', 'A']\n['  0', 'A']\n['${}', 'A']\n['  $', 'A']", shells=['bash'], variant='BUG')], line_number=44, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: FUNCNAME with source (scalar or array) (line 44)
E           
E           stdout mismatch:
E             expected: "['  @', 'source', 'f', 'g']\n['  0', 'source']\n['${}', 'source']\n['  $', 'source']\n-----\n['  @']\n['  0', '']\n['${}', '']\n['  $', '']\n-----\n[]\n-----\n['  @', 'A']\n['  0', 'A']\n['${}', 'A']\n['  $', 'A']"
E             actual:   "-----\n-----\n[]\n-----\n['  @', 'A']\n['  0', 'A']\n['${}', 'A']\n['  $', 'A']"
E           
E           Expected stdout: "['  @', 'source', 'f', 'g']\n['  0', 'source']\n['${}', 'source']\n['  $', 'source']\n-----\n['  @']\n['  0', '']\n['${}', '']\n['  $', '']\n-----\n[]\n-----\n['  @', 'A']\n['  0', 'A']\n['${}', 'A']\n['  $', 'A']"
E           Actual stdout:   "-----\n-----\n[]\n-----\n['  @', 'A']\n['  0', 'A']\n['${}', 'A']\n['  $', 'A']\n"
E           Expected stderr: None
E           Actual stderr:   'bash: source: spec/testdata/echo-funcname.sh: No such file or directory\nbash: source: spec/testdata/echo-funcname.sh: No such file or directory\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           cd $REPO_ROOT
E           
E           # Comments on bash quirk:
E           # https://github.com/oilshell/oil/pull/656#issuecomment-599162211
E           
E           f() {
E             . spec/testdata/echo-funcname.sh
E           }
E           g() {
E             f
E           }
E           
E           g
E           echo -----
E           
E           . spec/testdata/echo-funcname.sh
E           echo -----
E           
E           argv.py "${FUNCNAME[@]}"
E           
E           # Show bash inconsistency.  FUNCNAME doesn't behave like a normal array.
E           case $SH in 
E             (bash)
E               echo -----
E               a=('A')
E               argv.py '  @' "${a[@]}"
E               argv.py '  0' "${a[0]}"
E               argv.py '${}' "${a}"
E               argv.py '  $' "$a"
E               ;;
E           esac
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[introspect.test.sh::$((BASH_LINENO)) (scalar form in arith)[L150]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dedb0>
test_file = 'introspect.test.sh'
test_case = TestCase(name='$((BASH_LINENO)) (scalar form in arith)', script='check() {\n  echo $((BASH_LINENO))\n}\ncheck', assertions=[Assertion(type='stdout', value='4', shells=None, variant=None)], line_number=150, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $((BASH_LINENO)) (scalar form in arith) (line 150)
E           
E           stdout mismatch:
E             expected: '4'
E             actual:   '0'
E           
E           Expected stdout: '4'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           check() {
E             echo $((BASH_LINENO))
E           }
E           check
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[introspect.test.sh::${BASH_SOURCE[@]} with source and function name[L157]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dee70>
test_file = 'introspect.test.sh'
test_case = TestCase(name='${BASH_SOURCE[@]} with source and function name', script='cd $REPO_ROOT\n\nargv.py "${BASH_SOURCE[@]}"\...-source-simple.sh']\n['spec/testdata/bash-source-simple.sh']", shells=None, variant=None)], line_number=157, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${BASH_SOURCE[@]} with source and function name (line 157)
E           
E           stdout mismatch:
E             expected: "[]\n['spec/testdata/bash-source-simple.sh']\n['spec/testdata/bash-source-simple.sh']"
E             actual:   '[]'
E           
E           Expected stdout: "[]\n['spec/testdata/bash-source-simple.sh']\n['spec/testdata/bash-source-simple.sh']"
E           Actual stdout:   '[]\n'
E           Expected stderr: None
E           Actual stderr:   'bash: source: spec/testdata/bash-source-simple.sh: No such file or directory\nbash: f: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           cd $REPO_ROOT
E           
E           argv.py "${BASH_SOURCE[@]}"
E           source spec/testdata/bash-source-simple.sh
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[introspect.test.sh::${BASH_LINENO[@]} is a stack of line numbers for function calls[L172]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092def30>
test_file = 'introspect.test.sh'
test_case = TestCase(name='${BASH_LINENO[@]} is a stack of line numbers for function calls', script='# note: it\'s CALLS, not DEFI...ue="[]\n['begin F', '11']\n['G', '7', '11']\n['end F', '11']", shells=None, variant=None)], line_number=172, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${BASH_LINENO[@]} is a stack of line numbers for function calls (line 172)
E           
E           stdout mismatch:
E             expected: "[]\n['begin F', '11']\n['G', '7', '11']\n['end F', '11']"
E             actual:   "[]\n['begin F']\n['G']\n['end F']"
E           
E           Expected stdout: "[]\n['begin F', '11']\n['G', '7', '11']\n['end F', '11']"
E           Actual stdout:   "[]\n['begin F']\n['G']\n['end F']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # note: it's CALLS, not DEFINITIONS.
E           g() {
E             argv.py G "${BASH_LINENO[@]}"
E           }
E           f() {
E             argv.py 'begin F' "${BASH_LINENO[@]}"
E             g  # line 7
E             argv.py 'end F' "${BASH_LINENO[@]}"
E           }
E           argv.py ${BASH_LINENO[@]}
E           f  # line 11
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[introspect.test.sh::Locations with temp frame[L191]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092deff0>
test_file = 'introspect.test.sh'
test_case = TestCase(name='Locations with temp frame', script='cd $REPO_ROOT\n\n$SH spec/testdata/bash-source-pushtemp.sh', assert....sh:f:19\nSTACK:spec/testdata/bash-source-pushtemp.sh:main:0', shells=None, variant=None)], line_number=191, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Locations with temp frame (line 191)
E           
E           stdout mismatch:
E             expected: 'F\nG\nSTACK:spec/testdata/bash-source-pushtemp.sh:g:3\nSTACK:spec/testdata/bash-source-pushtemp.sh:f:19\nSTACK:spec/testdata/bash-source-pushtemp.sh:main:0'
E             actual:   ''
E           
E           Expected stdout: 'F\nG\nSTACK:spec/testdata/bash-source-pushtemp.sh:g:3\nSTACK:spec/testdata/bash-source-pushtemp.sh:f:19\nSTACK:spec/testdata/bash-source-pushtemp.sh:main:0'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: spec/testdata/bash-source-pushtemp.sh: No such file or directory\n'
E           Expected status: None
E           Actual status:   127
E           
E           Script:
E           ---
E           cd $REPO_ROOT
E           
E           $SH spec/testdata/bash-source-pushtemp.sh
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[introspect.test.sh::Locations when sourcing[L206]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092df0b0>
test_file = 'introspect.test.sh'
test_case = TestCase(name='Locations when sourcing', script="cd $REPO_ROOT\n\n# like above test case, but we source\n\n# bash loca...h:f:19\nSTACK:spec/testdata/bash-source-pushtemp.sh:source:2', shells=None, variant=None)], line_number=206, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Locations when sourcing (line 206)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 1, column 5
E           
E           
E           Script:
E           ---
E           cd $REPO_ROOT
E           
E           # like above test case, but we source
E           
E           # bash location doesn't make sense:
E           # - It says 'source' happens at line 1 of bash-source-pushtemp.  Well I think
E           # - It really happens at line 2 of '-c' !    I guess that's to line up
E           #   with the 'main' frame
E           
E           $SH -c 'true;
E           source spec/testdata/bash-source-pushtemp.sh'
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[loop.test.sh::implicit for loop[L4]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092df2f0>
test_file = 'loop.test.sh'
test_case = TestCase(name='implicit for loop', script='# This is like "for i in $@".\nfun() {\n  for i; do\n    echo $i\n  done\n ...ssertions=[Assertion(type='stdout', value='1\n2\n3\nfinished=3', shells=None, variant=None)], line_number=4, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: implicit for loop (line 4)
E           
E           stdout mismatch:
E             expected: '1\n2\n3\nfinished=3'
E             actual:   'finished='
E           
E           Expected stdout: '1\n2\n3\nfinished=3'
E           Actual stdout:   'finished=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # This is like "for i in $@".
E           fun() {
E             for i; do
E               echo $i
E             done
E             echo "finished=$i"
E           }
E           fun 1 2 3
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[loop.test.sh::the word 'in' can be the loop variable[L39]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092df530>
test_file = 'loop.test.sh'
test_case = TestCase(name="the word 'in' can be the loop variable", script='for in in a b c; do\n  echo $in\ndone\necho finished=$...sertions=[Assertion(type='stdout', value='a\nb\nc\nfinished=c', shells=None, variant=None)], line_number=39, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: the word 'in' can be the loop variable (line 39)
E           
E           Execution error: Expected variable name after 'for' at line 1, column 5
E           
E           
E           Script:
E           ---
E           for in in a b c; do
E             echo $in
E           done
E           echo finished=$in
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[loop.test.sh::while in pipe with subshell[L145]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dfb30>
test_file = 'loop.test.sh'
test_case = TestCase(name='while in pipe with subshell', script='i=0\nseq 3 | ( while read foo; do\n  i=$((i+1))\n  #echo $i\ndone\necho $i )', assertions=[Assertion(type='stdout', value='3', shells=None, variant=None)], line_number=145, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: while in pipe with subshell (line 145)
E           
E           stdout mismatch:
E             expected: '3'
E             actual:   '0'
E           
E           Expected stdout: '3'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           i=0
E           seq 3 | ( while read foo; do
E             i=$((i+1))
E             #echo $i
E           done
E           echo $i )
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[loop.test.sh::continue in subshell[L179]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dfd70>
test_file = 'loop.test.sh'
test_case = TestCase(name='continue in subshell', script='for i in $(seq 2); do\n  echo "> $i"\n  ( if true; then continue; fi; ec...s=0\n. 1\n> 2\nShould not print\nsubshell status=0\n. 2', shells=['mksh'], variant='BUG')], line_number=179, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: continue in subshell (line 179)
E           
E           stdout mismatch:
E             expected: '> 1\nsubshell status=0\n. 1\n> 2\nsubshell status=0\n. 2'
E             actual:   '> 1\nShould not print\nsubshell status=0\n. 1\n> 2\nShould not print\nsubshell status=0\n. 2'
E           
E           Expected stdout: '> 1\nsubshell status=0\n. 1\n> 2\nsubshell status=0\n. 2'
E           Actual stdout:   '> 1\nShould not print\nsubshell status=0\n. 1\n> 2\nShould not print\nsubshell status=0\n. 2\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           for i in $(seq 2); do
E             echo "> $i"
E             ( if true; then continue; fi; echo "Should not print" )
E             echo subshell status=$?
E             echo ". $i"
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[loop.test.sh::continue in subshell aborts with errexit[L214]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dfe30>
test_file = 'loop.test.sh'
test_case = TestCase(name='continue in subshell aborts with errexit', script='# The other shells don\'t let you recover from this ...mksh'], variant='BUG'), Assertion(type='status', value=0, shells=['mksh'], variant='BUG')], line_number=214, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: continue in subshell aborts with errexit (line 214)
E           
E           stdout mismatch:
E             expected: '> 1\nshould fail after subshell\n. 1\n> 2\nshould fail after subshell\n. 2'
E             actual:   '> 1\nShould not print\nshould fail after subshell\n. 1\n> 2\nShould not print\nshould fail after subshell\n. 2'
E           
E           Expected stdout: '> 1\nshould fail after subshell\n. 1\n> 2\nshould fail after subshell\n. 2'
E           Actual stdout:   '> 1\nShould not print\nshould fail after subshell\n. 1\n> 2\nShould not print\nshould fail after subshell\n. 2\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # The other shells don't let you recover from this programming error!
E           set -o errexit
E           for i in $(seq 2); do
E             echo "> $i"
E             ( if true; then continue; fi; echo "Should not print" )
E             echo 'should fail after subshell'
E             echo ". $i"
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[loop.test.sh::too many args to continue[L273]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1092dffb0>
test_file = 'loop.test.sh'
test_case = TestCase(name='too many args to continue', script='# OSH treats this as a parse error\nfor x in a b c; do\n  echo $x\n...='BUG'), Assertion(type='status', value=0, shells=['dash', 'mksh', 'zsh'], variant='BUG')], line_number=273, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: too many args to continue (line 273)
E           
E           stdout mismatch:
E             expected: 'a\n--'
E             actual:   'a\nb\nc\n--'
E           
E           Expected stdout: 'a\n--'
E           Actual stdout:   'a\nb\nc\n--\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # OSH treats this as a parse error
E           for x in a b c; do
E             echo $x
E             # bash breaks rather than continue or fatal error!!!
E             continue 1 2 3
E           done
E           echo --
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[loop.test.sh::break in condition of nested loop[L310]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109308170>
test_file = 'loop.test.sh'
test_case = TestCase(name='break in condition of nested loop', script='for i in 1 2 3; do\n  echo i=$i\n  while break; do\n    ech...ertions=[Assertion(type='stdout', value='i=1\ni=2\ni=3\ndone', shells=None, variant=None)], line_number=310, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: break in condition of nested loop (line 310)
E           
E           stdout mismatch:
E             expected: 'i=1\ni=2\ni=3\ndone'
E             actual:   'i=1\ndone'
E           
E           Expected stdout: 'i=1\ni=2\ni=3\ndone'
E           Actual stdout:   'i=1\ndone\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           for i in 1 2 3; do
E             echo i=$i
E             while break; do
E               echo x
E             done
E           done
E           echo done
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[loop.test.sh::return within eval[L325]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109308230>
test_file = 'loop.test.sh'
test_case = TestCase(name='return within eval', script="f() {\n  echo one\n  eval 'return'\n  echo two\n}\nf", assertions=[Assertion(type='stdout', value='one', shells=None, variant=None)], line_number=325, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: return within eval (line 325)
E           
E           stdout mismatch:
E             expected: 'one'
E             actual:   'one\ntwo'
E           
E           Expected stdout: 'one'
E           Actual stdout:   'one\ntwo\n'
E           Expected stderr: None
E           Actual stderr:   'bash: eval: return 0\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E             echo one
E             eval 'return'
E             echo two
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[loop.test.sh::break/continue within eval[L336]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093082f0>
test_file = 'loop.test.sh'
test_case = TestCase(name='break/continue within eval', script="# NOTE: This changes things\n# set -e\nf() {\n  for i in $(seq 5);...nt=None), Assertion(type='stdout', value='1\n2\n3\n4\n5', shells=['mksh'], variant='BUG')], line_number=336, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: break/continue within eval (line 336)
E           
E           stdout mismatch:
E             expected: '1\n3'
E             actual:   '1\n2\n3\n4\n5\ndone'
E           
E           Expected stdout: '1\n3'
E           Actual stdout:   '1\n2\n3\n4\n5\ndone\n'
E           Expected stderr: None
E           Actual stderr:   'bash: eval: continue 1\nbash: eval: break 1\nbash: eval: return 0\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # NOTE: This changes things
E           # set -e
E           f() {
E             for i in $(seq 5); do 
E               if test $i = 2; then
E                 eval continue
E               fi
E               if test $i = 4; then
E                 eval break
E               fi
E               echo $i
E             done
E           
E             eval 'return'
E             echo 'done'
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[loop.test.sh::break/continue within source[L366]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093083b0>
test_file = 'loop.test.sh'
test_case = TestCase(name='break/continue within source', script="# NOTE: This changes things\n# set -e\n\n# Create the test data ...ertion(type='stdout', value='1\n2\n3\n4\n5\ndone', shells=['zsh', 'mksh'], variant='BUG')], line_number=366, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: break/continue within source (line 366)
E           
E           stdout mismatch:
E             expected: '1\n3\ndone'
E             actual:   ''
E           
E           Expected stdout: '1\n3\ndone'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # NOTE: This changes things
E           # set -e
E           
E           # Create the test data files inline
E           mkdir -p /tmp/testdata
E           echo 'continue' > /tmp/testdata/continue.sh
E           echo 'break' > /tmp/testdata/break.sh
E           echo 'return' > /tmp/testdata/return.sh
E           
E           f() {
E             for i in $(seq 5); do
E               if test $i = 2; then
E                 . /tmp/testdata/continue.sh
E               fi
E               if test $i = 4; then
E                 . /tmp/testdata/break.sh
E               fi
E               echo $i
E             done
E           
E             # Return is different!
E             . /tmp/testdata/return.sh
E             echo done
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[loop.test.sh::top-level break/continue/return (without strict_control_flow)[L406]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109308470>
test_file = 'loop.test.sh'
test_case = TestCase(name='top-level break/continue/return (without strict_control_flow)', script='# Test break/continue/return at...ls=None, variant=None), Assertion(type='stdout', value='', shells=['zsh'], variant='BUG')], line_number=406, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: top-level break/continue/return (without strict_control_flow) (line 406)
E           
E           Execution error: return 0
E           
E           
E           Script:
E           ---
E           # Test break/continue/return at top level (outside of loops/functions)
E           # In bash, break/continue print warnings but succeed with exit 0
E           # return at top level fails with exit 1
E           break; echo break=$?
E           continue; echo continue=$?
E           return; echo return=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[loop.test.sh::$b break, $c continue, $r return, $e exit[L483]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093086b0>
test_file = 'loop.test.sh'
test_case = TestCase(name='$b break, $c continue, $r return, $e exit', script="# hm would it be saner to make FATAL builtins calle...reak\n1\n- continue\n1\n3\n- return\n1\n2\nstatus=99\n- exit', shells=None, variant=None)], line_number=483, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $b break, $c continue, $r return, $e exit (line 483)
E           
E           stdout mismatch:
E             expected: '- break\n1\n- continue\n1\n3\n- return\n1\n2\nstatus=99\n- exit'
E             actual:   '- break\n1\n- continue\n1\n3\nstatus=99\n- exit'
E           
E           Expected stdout: '- break\n1\n- continue\n1\n3\n- return\n1\n2\nstatus=99\n- exit'
E           Actual stdout:   '- break\n1\n- continue\n1\n3\nstatus=99\n- exit\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 5
E           Actual status:   5
E           
E           Script:
E           ---
E           # hm would it be saner to make FATAL builtins called break/continue/etc.?
E           # On the other hand, this spits out errors loudly.
E           
E           echo '- break'
E           b=break
E           for i in 1 2 3; do
E             echo $i
E             $b
E           done
E           
E           echo '- continue'
E           c='continue'
E           for i in 1 2 3; do
E             if test $i = 2; then
E               $c
E             fi
E             echo $i
E           done
E           
E           r='return'
E           f() {
E             echo '- return'
E             for i in 1 2 3; do
E               echo $i
E               if test $i = 2; then
E                 $r 99
E               fi
E             done
E           }
E           f
E           echo status=$?
E           
E           echo '- exit'
E           e='exit'
E           $e 5
E           echo 'not executed'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[loop.test.sh::\\break \\continue \\return \\exit[L536]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109308770>
test_file = 'loop.test.sh'
test_case = TestCase(name='\\break \\continue \\return \\exit', script="echo '- break'\nfor i in 1 2 3; do\n  echo $i\n  \\break\n...reak\n1\n- continue\n1\n3\n- return\n1\n2\nstatus=99\n- exit', shells=None, variant=None)], line_number=536, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \break \continue \return \exit (line 536)
E           
E           stdout mismatch:
E             expected: '- break\n1\n- continue\n1\n3\n- return\n1\n2\nstatus=99\n- exit'
E             actual:   '- break\n1\n- continue\n1\n3\nstatus=99\n- exit'
E           
E           Expected stdout: '- break\n1\n- continue\n1\n3\n- return\n1\n2\nstatus=99\n- exit'
E           Actual stdout:   '- break\n1\n- continue\n1\n3\nstatus=99\n- exit\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 5
E           Actual status:   5
E           
E           Script:
E           ---
E           echo '- break'
E           for i in 1 2 3; do
E             echo $i
E             \break
E           done
E           
E           echo '- continue'
E           for i in 1 2 3; do
E             if test $i = 2; then
E               \continue
E             fi
E             echo $i
E           done
E           
E           f() {
E             echo '- return'
E             for i in 1 2 3; do
E               echo $i
E               if test $i = 2; then
E                 \return 99
E               fi
E             done
E           }
E           f
E           echo status=$?
E           
E           echo '- exit'
E           \exit 5
E           echo 'not executed'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[loop.test.sh::builtin,command break,continue,return,exit[L582]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109308830>
test_file = 'loop.test.sh'
test_case = TestCase(name='builtin,command break,continue,return,exit', script="case $SH in dash|zsh) exit ;; esac\n\necho '- brea...variant='N-I'), Assertion(type='stdout', value='', shells=['dash', 'zsh'], variant='N-I')], line_number=582, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: builtin,command break,continue,return,exit (line 582)
E           
E           stdout mismatch:
E             expected: '- break\n1\n- continue\n1\n3\n- return\n1\n2\nstatus=99\n- exit'
E             actual:   '- break\n1\n- continue\n1\n3\nstatus=99\n- exit'
E           
E           Expected stdout: '- break\n1\n- continue\n1\n3\n- return\n1\n2\nstatus=99\n- exit'
E           Actual stdout:   '- break\n1\n- continue\n1\n3\nstatus=99\n- exit\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 5
E           Actual status:   5
E           
E           Script:
E           ---
E           case $SH in dash|zsh) exit ;; esac
E           
E           echo '- break'
E           for i in 1 2 3; do
E             echo $i
E             builtin break
E           done
E           
E           echo '- continue'
E           for i in 1 2 3; do
E             if test $i = 2; then
E               command continue
E             fi
E             echo $i
E           done
E           
E           f() {
E             echo '- return'
E             for i in 1 2 3; do
E               echo $i
E               if test $i = 2; then
E                 builtin command return 99
E               fi
E             done
E           }
E           f
E           echo status=$?
E           
E           echo '- exit'
E           command builtin exit 5
E           echo 'not executed'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::pass array by reference[L4]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093088f0>
test_file = 'nameref.test.sh'
test_case = TestCase(name='pass array by reference', script='show_value() {\n  local -n array_name=$1\n  local idx=$2\n  echo "${a...lue shadock 2', assertions=[Assertion(type='stdout', value='zo', shells=None, variant=None)], line_number=4, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: pass array by reference (line 4)
E           
E           stdout mismatch:
E             expected: 'zo'
E             actual:   ''
E           
E           Expected stdout: 'zo'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show_value() {
E             local -n array_name=$1
E             local idx=$2
E             echo "${array_name[$idx]}"
E           }
E           shadock=(ga bu zo meu)
E           show_value shadock 2
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::mutate array by reference[L14]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093089b0>
test_file = 'nameref.test.sh'
test_case = TestCase(name='mutate array by reference', script='set1() {\n  local -n array_name=$1\n  local val=$2\n  array_name[1]...k[@]}', assertions=[Assertion(type='stdout', value='a ZZZ c d', shells=None, variant=None)], line_number=14, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: mutate array by reference (line 14)
E           
E           stdout mismatch:
E             expected: 'a ZZZ c d'
E             actual:   'a b c d'
E           
E           Expected stdout: 'a ZZZ c d'
E           Actual stdout:   'a b c d\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set1() {
E             local -n array_name=$1
E             local val=$2
E             array_name[1]=$val
E           }
E           shadock=(a b c d)
E           set1 shadock ZZZ
E           echo ${shadock[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::pass assoc array by reference[L27]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109308a70>
test_file = 'nameref.test.sh'
test_case = TestCase(name='pass assoc array by reference', script='show_value() {\n  local -n array_name=$1\n  local idx=$2\n  ech...ant=None), Assertion(type='stdout', value='[monday]=eggs', shells=['mksh'], variant='BUG')], line_number=27, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: pass assoc array by reference (line 27)
E           
E           stdout mismatch:
E             expected: 'jam'
E             actual:   ''
E           
E           Expected stdout: 'jam'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show_value() {
E             local -n array_name=$1
E             local idx=$2
E             echo "${array_name[$idx]}"
E           }
E           days=([monday]=eggs [tuesday]=bread [sunday]=jam)
E           show_value days sunday
E           #  mksh note: it coerces "days" to 0?  Horrible.
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::pass local array by reference, relying on DYNAMIC SCOPING[L39]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109308b30>
test_file = 'nameref.test.sh'
test_case = TestCase(name='pass local array by reference, relying on DYNAMIC SCOPING', script='show_value() {\n  local -n array_na...'mksh'], variant='BUG'), Assertion(type='status', value=1, shells=['mksh'], variant='BUG')], line_number=39, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: pass local array by reference, relying on DYNAMIC SCOPING (line 39)
E           
E           stdout mismatch:
E             expected: 'zo'
E             actual:   ''
E           
E           Expected stdout: 'zo'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show_value() {
E             local -n array_name=$1
E             local idx=$2
E             echo "${array_name[$idx]}"
E           }
E           caller() {
E             local shadock=(ga bu zo meu)
E             show_value shadock 2
E           }
E           caller
E           # mksh appears not to have local arrays!
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[nameref.test.sh::flag -n and +n[L56]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109308bf0>
test_file = 'nameref.test.sh'
test_case = TestCase(name='flag -n and +n', script='x=foo\n\nref=x\n\necho ref=$ref\n\ntypeset -n ref\necho ref=$ref\n\n# mutate u...ssertion(type='stdout', value='ref=x\nref=foo\nref=bar\nref=x', shells=None, variant=None)], line_number=56, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: flag -n and +n (line 56)
E           
E           stdout mismatch:
E             expected: 'ref=x\nref=foo\nref=bar\nref=x'
E             actual:   'ref=x\nref=x\nref=x\nref=x'
E           
E           Expected stdout: 'ref=x\nref=foo\nref=bar\nref=x'
E           Actual stdout:   'ref=x\nref=x\nref=x\nref=x\n'
E           Expected stderr: None
E           Actual stderr:   "bash: declare: `+n': not a valid identifier\n"
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x=foo
E           
E           ref=x
E           
E           echo ref=$ref
E           
E           typeset -n ref
E           echo ref=$ref
E           
E           # mutate underlying var
E           x=bar
E           echo ref=$ref
E           
E           typeset +n ref
E           echo ref=$ref
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::mutating through nameref: ref=[L80]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109308cb0>
test_file = 'nameref.test.sh'
test_case = TestCase(name='mutating through nameref: ref=', script="x=XX\ny=YY\n\nref=x\nref=y\necho 1 ref=$ref\n\n# now it's a re...type='stdout', value='1 ref=y\n2 ref=YY\n3 ref=XXXX\n4 y=XXXX', shells=None, variant=None)], line_number=80, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: mutating through nameref: ref= (line 80)
E           
E           stdout mismatch:
E             expected: '1 ref=y\n2 ref=YY\n3 ref=XXXX\n4 y=XXXX'
E             actual:   '1 ref=y\n2 ref=y\n3 ref=XXXX\n4 y=YY'
E           
E           Expected stdout: '1 ref=y\n2 ref=YY\n3 ref=XXXX\n4 y=XXXX'
E           Actual stdout:   '1 ref=y\n2 ref=y\n3 ref=XXXX\n4 y=YY\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x=XX
E           y=YY
E           
E           ref=x
E           ref=y
E           echo 1 ref=$ref
E           
E           # now it's a reference
E           typeset -n ref
E           
E           echo 2 ref=$ref  # prints YY
E           
E           ref=XXXX
E           echo 3 ref=$ref  # it actually prints y, which is XXXX
E           
E           # now Y is mutated!
E           echo 4 y=$y
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::flag -n combined ${!ref} -- bash INVERTS[L107]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109308d70>
test_file = 'nameref.test.sh'
test_case = TestCase(name='flag -n combined ${!ref} -- bash INVERTS', script='foo=FOO  # should NOT use this\n\nx=foo\nref=x\n\nec... value='ref=x\n!ref=ref\nNOW A NAMEREF\nref=foo\n!ref=x', shells=['mksh'], variant='N-I')], line_number=107, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: flag -n combined ${!ref} -- bash INVERTS (line 107)
E           
E           stdout mismatch:
E             expected: 'ref=x\n!ref=foo\nNOW A NAMEREF\nref=foo\n!ref=x'
E             actual:   'ref=x\n!ref=foo\nNOW A NAMEREF\nref=x\n!ref='
E           
E           Expected stdout: 'ref=x\n!ref=foo\nNOW A NAMEREF\nref=foo\n!ref=x'
E           Actual stdout:   'ref=x\n!ref=foo\nNOW A NAMEREF\nref=x\n!ref=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           foo=FOO  # should NOT use this
E           
E           x=foo
E           ref=x
E           
E           echo ref=$ref
E           echo "!ref=${!ref}"
E           
E           echo 'NOW A NAMEREF'
E           
E           typeset -n ref
E           echo ref=$ref
E           echo "!ref=${!ref}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::named ref with 1 $1 etc.[L179]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109308fb0>
test_file = 'nameref.test.sh'
test_case = TestCase(name='named ref with 1 $1 etc.', script="set -- one two three\n\nx=X\n\nref='1'\necho ref=$ref\ntypeset -n re...Assertion(type='stdout', value='ref=1\nref=one\nref2=$1', shells=['mksh'], variant='BUG')], line_number=179, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: named ref with 1 $1 etc. (line 179)
E           
E           stdout mismatch:
E             expected: 'ref=1\nref=1\nref2=$1\nref2=$1\nref3=x\nref3=foo'
E             actual:   'ref=1\nref=1\nref2=$1\nref2=$1\nref3=x\nref3=x'
E           
E           Expected stdout: 'ref=1\nref=1\nref2=$1\nref2=$1\nref3=x\nref3=foo'
E           Actual stdout:   'ref=1\nref=1\nref2=$1\nref2=$1\nref3=x\nref3=x\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- one two three
E           
E           x=X
E           
E           ref='1'
E           echo ref=$ref
E           typeset -n ref
E           echo ref=$ref
E           
E           # BUG: This is really assigning '1', which is INVALID
E           # with strict_nameref that degrades!!!
E           ref2='$1'
E           echo ref2=$ref2
E           typeset -n ref2
E           echo ref2=$ref2
E           
E           x=foo
E           
E           ref3='x'
E           echo ref3=$ref3
E           typeset -n ref3
E           echo ref3=$ref3
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::assign to empty nameref and invalid nameref[L275]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093092b0>
test_file = 'nameref.test.sh'
test_case = TestCase(name='assign to empty nameref and invalid nameref', script='typeset -n ref\necho ref=$ref\n\n# this is a no-o...'], variant='OK'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='OK')], line_number=275, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: assign to empty nameref and invalid nameref (line 275)
E           
E           stdout mismatch:
E             expected: 'ref=\nref=\nref2=\nref2=x'
E             actual:   'ref=\nref=x\nref2=\nref2=x'
E           
E           Expected stdout: 'ref=\nref=\nref2=\nref2=x'
E           Actual stdout:   'ref=\nref=x\nref2=\nref2=x\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset -n ref
E           echo ref=$ref
E           
E           # this is a no-op in bash, should be stricter
E           ref=x
E           echo ref=$ref
E           
E           typeset -n ref2=undef
E           echo ref2=$ref2
E           ref2=x
E           echo ref2=$ref2
E           
E           
E           # mksh gives a good error: empty nameref target
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::-n attribute before it has a value[L299]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109309370>
test_file = 'nameref.test.sh'
test_case = TestCase(name='-n attribute before it has a value', script="typeset -n ref\n\necho ref=$ref\n\n# Now that it's a strin..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='N-I')], line_number=299, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: -n attribute before it has a value (line 299)
E           
E           stdout mismatch:
E             expected: 'ref=\nref=XX'
E             actual:   'ref=\nref=x'
E           
E           Expected stdout: 'ref=\nref=XX'
E           Actual stdout:   'ref=\nref=x\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset -n ref
E           
E           echo ref=$ref
E           
E           # Now that it's a string, it still has the -n attribute
E           x=XX
E           ref=x
E           echo ref=$ref
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[nameref.test.sh::exported nameref[L337]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093094f0>
test_file = 'nameref.test.sh'
test_case = TestCase(name='exported nameref', script="x=foo\ntypeset -n -x ref=x\n\n# hm bash ignores it but mksh doesn't.  maybe ...sertion(type='stdout', value='None\nNone\n---\nfoo\nNone', shells=['mksh'], variant='OK')], line_number=337, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: exported nameref (line 337)
E           
E           stdout mismatch:
E             expected: 'None\nx\n---\nfoo\nx'
E             actual:   'foo\nNone\n---\nfoo\nNone'
E           
E           Expected stdout: 'None\nx\n---\nfoo\nx'
E           Actual stdout:   'foo\nNone\n---\nfoo\nNone\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x=foo
E           typeset -n -x ref=x
E           
E           # hm bash ignores it but mksh doesn't.  maybe disallow it.
E           printenv.py x ref
E           echo ---
E           export x
E           printenv.py x ref
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::readonly var can't be assigned through nameref[L384]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109309670>
test_file = 'nameref.test.sh'
test_case = TestCase(name="readonly var can't be assigned through nameref", script="x=X\ntypeset -n -r ref=x\n\necho ref=$ref\n\n#...ertion(type='stdout', value='ref=X\nref=XX\nref=XX\nx=XX', shells=['bash'], variant='OK')], line_number=384, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: readonly var can't be assigned through nameref (line 384)
E           
E           stdout mismatch:
E             expected: 'ref=X\nref=XX\nref=XX\nx=XX'
E             actual:   'ref=X\nref=XX\nref=XXX\nx=XXX'
E           
E           Expected stdout: 'ref=X\nref=XX\nref=XX\nx=XX'
E           Actual stdout:   'ref=X\nref=XX\nref=XXX\nx=XXX\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           x=X
E           typeset -n -r ref=x
E           
E           echo ref=$ref
E           
E           # it feels like I shouldn't be able to mutate this?
E           ref=XX
E           echo ref=$ref
E           
E           # now the underling variable is immutable
E           typeset -r x
E           
E           ref=XXX
E           echo ref=$ref
E           echo x=$x
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::Mutually recursive namerefs detected on WRITE[L462]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109309970>
test_file = 'nameref.test.sh'
test_case = TestCase(name='Mutually recursive namerefs detected on WRITE', script='typeset -n ref1=ref2\ntypeset -n ref2=ref1  # n...), Assertion(type='stdout', value='defined 0\nmutated 1', shells=['bash'], variant='BUG')], line_number=462, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Mutually recursive namerefs detected on WRITE (line 462)
E           
E           stdout mismatch:
E             expected: 'defined 0\nmutated 1'
E             actual:   'defined 0\nmutated 0'
E           
E           Expected stdout: 'defined 0\nmutated 1'
E           Actual stdout:   'defined 0\nmutated 0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset -n ref1=ref2
E           typeset -n ref2=ref1  # not detected here
E           echo defined $?
E           ref1=z  # detected here
E           echo mutated $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::Dynamic scope with namerefs[L479]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109309a30>
test_file = 'nameref.test.sh'
test_case = TestCase(name='Dynamic scope with namerefs', script='f3() {\n  local -n ref=$1\n  ref=x\n}\n\nf2() {\n  f3 "$@"\n}\n\n...f1', assertions=[Assertion(type='stdout', value='F1=F1\nF1=x', shells=None, variant=None)], line_number=479, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Dynamic scope with namerefs (line 479)
E           
E           stdout mismatch:
E             expected: 'F1=F1\nF1=x'
E             actual:   'F1=F1\nF1=F1'
E           
E           Expected stdout: 'F1=F1\nF1=x'
E           Actual stdout:   'F1=F1\nF1=F1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f3() {
E             local -n ref=$1
E             ref=x
E           }
E           
E           f2() {
E             f3 "$@"
E           }
E           
E           f1() {
E             local F1=F1
E             echo F1=$F1
E             f2 F1
E             echo F1=$F1
E           }
E           f1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::a[expr] in nameref[L546]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109309c70>
test_file = 'nameref.test.sh'
test_case = TestCase(name='a[expr] in nameref', script="# this confuses code and data\ntypeset -n ref='a[$(echo 2) + 1]'\na=(zero ...$ref", assertions=[Assertion(type='stdout', value='ref=three', shells=None, variant=None)], line_number=546, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: a[expr] in nameref (line 546)
E           
E           stdout mismatch:
E             expected: 'ref=three'
E             actual:   'ref=zero'
E           
E           Expected stdout: 'ref=three'
E           Actual stdout:   'ref=zero\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # this confuses code and data
E           typeset -n ref='a[$(echo 2) + 1]'
E           a=(zero one two three)
E           echo ref=$ref
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[nameref.test.sh::a[@] in nameref[L556]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109309d30>
test_file = 'nameref.test.sh'
test_case = TestCase(name='a[@] in nameref', script='# this confuses code and data\ntypeset -n ref=\'a[@]\'\na=(\'A B\' C)\nargv.p...'], variant='OK'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='OK')], line_number=556, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: a[@] in nameref (line 556)
E           
E           stdout mismatch:
E             expected: "['ref', 'A B', 'C']\nstatus=1\n['ref[@]']\n['ref', 'A B', 'C']\n['a[@]', 'A B', 'C']"
E             actual:   "['ref', 'A B C']\nstatus=0\n['ref[@]']\n['ref', 'A B C']\n['a[@]', 'A B', 'C']"
E           
E           Expected stdout: "['ref', 'A B', 'C']\nstatus=1\n['ref[@]']\n['ref', 'A B', 'C']\n['a[@]', 'A B', 'C']"
E           Actual stdout:   "['ref', 'A B C']\nstatus=0\n['ref[@]']\n['ref', 'A B C']\n['a[@]', 'A B', 'C']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # this confuses code and data
E           typeset -n ref='a[@]'
E           a=('A B' C)
E           argv.py ref "$ref"  # READ through ref works
E           ref=(X Y Z)    # WRITE through doesn't work
E           echo status=$?
E           argv.py 'ref[@]' "${ref[@]}"
E           argv.py ref "$ref"  # JOINING mangles the array?
E           argv.py 'a[@]' "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::bad mutation through nameref: ref[0]= where ref is array[0][L589]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109309eb0>
test_file = 'nameref.test.sh'
test_case = TestCase(name='bad mutation through nameref: ref[0]= where ref is array[0]', script="array=(X Y Z)\ntypeset -n ref='ar...one), Assertion(type='stdout', value='status=0\nfoo Y Z', shells=['mksh'], variant='BUG')], line_number=589, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: bad mutation through nameref: ref[0]= where ref is array[0] (line 589)
E           
E           stdout mismatch:
E             expected: 'status=1\nX Y Z'
E             actual:   'status=0\nX Y Z'
E           
E           Expected stdout: 'status=1\nX Y Z'
E           Actual stdout:   'status=0\nX Y Z\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           array=(X Y Z)
E           typeset -n ref='array[0]'
E           ref[0]=foo  # error in bash: 'array[0]': not a valid identifier
E           echo status=$?
E           echo ${array[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::@ in nameref isn't supported, unlike in ${!ref}[L604]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109309f70>
test_file = 'nameref.test.sh'
test_case = TestCase(name="@ in nameref isn't supported, unlike in ${!ref}", script="set -- A B\ntypeset -n ref='@'  # bash gives ...Assertion(type='stdout', value='status=1\nref=\nstatus=0', shells=['bash'], variant='OK')], line_number=604, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: @ in nameref isn't supported, unlike in ${!ref} (line 604)
E           
E           stdout mismatch:
E             expected: 'status=1\nref=\nstatus=0'
E             actual:   'status=0\nref=A B\nstatus=0'
E           
E           Expected stdout: 'status=1\nref=\nstatus=0'
E           Actual stdout:   'status=0\nref=A B\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- A B
E           typeset -n ref='@'  # bash gives an error here
E           echo status=$?
E           
E           echo ref=$ref  # bash doesn't give an error here
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nameref.test.sh::Unquoted assoc reference on RHS[L621]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930a030>
test_file = 'nameref.test.sh'
test_case = TestCase(name='Unquoted assoc reference on RHS', script='typeset -A bashup_ev_r\nbashup_ev_r[\'foo\']=bar\n\np() {\n  ...mksh'], variant='N-I'), Assertion(type='status', value=1, shells=['mksh'], variant='N-I')], line_number=621, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Unquoted assoc reference on RHS (line 621)
E           
E           stdout mismatch:
E             expected: "['bar']"
E             actual:   "['bashup_ev_r[foo]']"
E           
E           Expected stdout: "['bar']"
E           Actual stdout:   "['bashup_ev_r[foo]']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset -A bashup_ev_r
E           bashup_ev_r['foo']=bar
E           
E           p() {
E             local s=foo
E             local -n e=bashup_ev["$s"] f=bashup_ev_r["$s"]
E             # Different!
E             #local e=bashup_ev["$s"] f=bashup_ev_r["$s"]
E             argv.py "$f"
E           }
E           p
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nix-idioms.test.sh::var ref to array 'preHooks[@]'[L3]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930a0f0>
test_file = 'nix-idioms.test.sh'
test_case = TestCase(name="var ref to array 'preHooks[@]'", script='#\n# This idiom discussed on\n# https://github.com/NixOS/nixpk..."show\n[]\n[]\nshow\n['foo', 'bar', 'baz']\n['foo bar', 'baz']", shells=None, variant=None)], line_number=3, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: var ref to array 'preHooks[@]' (line 3)
E           
E           stdout mismatch:
E             expected: "show\n[]\n[]\nshow\n['foo', 'bar', 'baz']\n['foo bar', 'baz']"
E             actual:   "show\n[]\n[]\nshow\n['foo', 'bar', 'baz']\n[]"
E           
E           Expected stdout: "show\n[]\n[]\nshow\n['foo', 'bar', 'baz']\n['foo bar', 'baz']"
E           Actual stdout:   "show\n[]\n[]\nshow\n['foo', 'bar', 'baz']\n[]\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           #
E           # This idiom discussed on
E           # https://github.com/NixOS/nixpkgs/pull/147629
E           
E           show() {
E             echo show
E           
E             # These are actually different
E             argv.py ${!hooksSlice}
E           
E             argv.py ${!hooksSlice+"${!hooksSlice}"}
E           }
E           
E           hooksSlice='preHooks[@]'
E           
E           preHooks=()
E           show
E           
E           preHooks=('foo bar' baz)
E           show
E           
E           # WTF this exposes a difference?  But not the test case below?
E           
E           # What's happening here?
E           # Uncomment this and get an error in bash about hookSlice, even though we never
E           # undefined it.
E           
E           #wtf=1
E           #
E           # line 6: !hooksSlice: unbound variable
E           
E           if test -n "$wtf"; then
E             # 4.4.0(1)-release
E             # echo $BASH_VERSION
E           
E             set -u
E             preHooks=()
E             show
E           
E             preHooks=('foo bar' baz)
E             show
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nix-idioms.test.sh::Similar to above with set -u[L56]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930a1b0>
test_file = 'nix-idioms.test.sh'
test_case = TestCase(name='Similar to above with set -u', script='show() {\n  echo show\n\n  # bash gives an error here - !hookSli...tion(type='stdout', value="show\n[]\nshow\n['foo bar', 'baz']", shells=None, variant=None)], line_number=56, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Similar to above with set -u (line 56)
E           
E           stdout mismatch:
E             expected: "show\n[]\nshow\n['foo bar', 'baz']"
E             actual:   'show\n[]\nshow\n[]'
E           
E           Expected stdout: "show\n[]\nshow\n['foo bar', 'baz']"
E           Actual stdout:   'show\n[]\nshow\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show() {
E             echo show
E           
E             # bash gives an error here - !hookSlice unbound, even though preHooks exists
E             # OSH currently does the "logical" thing
E           
E             # NOT testing this -- I think this is WHAT NIX WORKS AROUND WITH
E             #argv.py ${!hooksSlice}
E           
E             argv.py ${!hooksSlice+"${!hooksSlice}"}
E           }
E           
E           hooksSlice='preHooks[@]'
E           
E           set -u
E           preHooks=()
E           show
E           
E           preHooks=('foo bar' baz)
E           show
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nix-idioms.test.sh::${!ref} to undefined string var is fatal, INCONSISTENT with array[L103]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930a330>
test_file = 'nix-idioms.test.sh'
test_case = TestCase(name='${!ref} to undefined string var is fatal, INCONSISTENT with array', script="hookSlice='preHooks'\n\narg...ells=None, variant=None), Assertion(type='stdout', value='[]', shells=None, variant=None)], line_number=103, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${!ref} to undefined string var is fatal, INCONSISTENT with array (line 103)
E           
E           stdout mismatch:
E             expected: '[]'
E             actual:   '[]\n[]\nend'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: '[]'
E           Actual stdout:   '[]\n[]\nend\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           hookSlice='preHooks'
E           
E           argv.py ${!hookSlice}
E           
E           set -u
E           
E           argv.py ${!hookSlice}
E           
E           echo end
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nix-idioms.test.sh::export with dynamic var name +=[L119]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930a3f0>
test_file = 'nix-idioms.test.sh'
test_case = TestCase(name='export with dynamic var name +=', script='orig() {\n  export NIX_LDFLAGS${role_post}+=" -L$1/lib64"\n}\...stdout', value="declare -x NIX_LDFLAGS_foo=' -Lone/lib64'", shells=['osh'], variant='OK')], line_number=119, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: export with dynamic var name += (line 119)
E           
E           stdout mismatch:
E             expected: 'declare -x NIX_LDFLAGS_foo=" -Lone/lib64"\ndeclare -x NIX_LDFLAGS_foo=" -Lone/lib64"'
E             actual:   'declare -- NIX_LDFLAGS_foo=" -Lone/lib64"'
E           
E           Expected stdout: 'declare -x NIX_LDFLAGS_foo=" -Lone/lib64"\ndeclare -x NIX_LDFLAGS_foo=" -Lone/lib64"'
E           Actual stdout:   'declare -- NIX_LDFLAGS_foo=" -Lone/lib64"\n'
E           Expected stderr: None
E           Actual stderr:   "bash: export: 'NIX_LDFLAGS_foo+': not a valid identifier\nbash: declare: NIX_LDFLAGS_foo: not found\n"
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           orig() {
E             export NIX_LDFLAGS${role_post}+=" -L$1/lib64"
E           }
E           
E           new() {
E             local var_name="NIX_LDFLAGS$role_post"
E             local value=" -L$1/lib64"
E           
E             eval "$var_name"+='$value'
E             export "$var_name"
E           }
E           
E           role_post='_foo'
E           
E           # set -u
E           
E           if test -n "${BASH_VERSION:-}"; then
E             orig one
E           fi
E           
E           declare -p NIX_LDFLAGS_foo  # inspect it
E           unset NIX_LDFLAGS_foo
E           
E           new one
E           
E           declare -p NIX_LDFLAGS_foo  # inspect it
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nix-idioms.test.sh::let idiom can be written in POSIX shell - eval ": \\$(( ))"[L156]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930a4b0>
test_file = 'nix-idioms.test.sh'
test_case = TestCase(name='let idiom can be written in POSIX shell - eval ": \\$(( ))"', script='for i in 0 1 2; do\n  echo i=$i\n...NATIVE=2\nNIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu=3\n', shells=None, variant=None)], line_number=156, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: let idiom can be written in POSIX shell - eval ": \$(( ))" (line 156)
E           
E           stdout mismatch:
E             expected: 'i=0\nNIX_ENFORCE_NO_NATIVE=0\nNIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu=0\n\ni=1\nNIX_ENFORCE_NO_NATIVE=1\nNIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu=1\n\ni=2\nNIX_ENFORCE_NO_NATIVE=2\nNIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu=3'
E             actual:   'i=0\nNIX_ENFORCE_NO_NATIVE=0\nNIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu=\n\ni=1\nNIX_ENFORCE_NO_NATIVE=1\nNIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu=\n\ni=2\nNIX_ENFORCE_NO_NATIVE=2\nNIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu='
E           
E           Expected stdout: 'i=0\nNIX_ENFORCE_NO_NATIVE=0\nNIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu=0\n\ni=1\nNIX_ENFORCE_NO_NATIVE=1\nNIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu=1\n\ni=2\nNIX_ENFORCE_NO_NATIVE=2\nNIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu=3\n'
E           Actual stdout:   'i=0\nNIX_ENFORCE_NO_NATIVE=0\nNIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu=\n\ni=1\nNIX_ENFORCE_NO_NATIVE=1\nNIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu=\n\ni=2\nNIX_ENFORCE_NO_NATIVE=2\nNIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu=\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           for i in 0 1 2; do
E             echo i=$i
E           
E             NIX_ENFORCE_NO_NATIVE=$i
E           
E             outputVar=NIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu
E             inputVar=NIX_ENFORCE_NO_NATIVE
E           
E             # Original Nix idiom
E           
E             if test -n "$BASH_VERSION"; then
E               let "${outputVar} |= ${!inputVar:-0}" "1"
E             else
E               # OSH alternative
E               eval ": \$(( ${outputVar} |= ${!inputVar:-0} ))"
E             fi
E           
E             echo NIX_ENFORCE_NO_NATIVE=$NIX_ENFORCE_NO_NATIVE
E             echo NIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu=$NIX_ENFORCE_NO_NATIVE_x86_64_unknown_linux_gnu
E             echo
E           
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nocasematch-match.test.sh::[[ equality matching[L6]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930a570>
test_file = 'nocasematch-match.test.sh'
test_case = TestCase(name='[[ equality matching', script='shopt -s nocasematch\n[[ a == A ]]; echo $?\n[[ A == a ]]; echo $?\n[[ A...ho $?', assertions=[Assertion(type='stdout', value='0\n0\n0\n0', shells=None, variant=None)], line_number=6, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ equality matching (line 6)
E           
E           stdout mismatch:
E             expected: '0\n0\n0\n0'
E             actual:   '1\n1\n1\n1'
E           
E           Expected stdout: '0\n0\n0\n0'
E           Actual stdout:   '1\n1\n1\n1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s nocasematch
E           [[ a == A ]]; echo $?
E           [[ A == a ]]; echo $?
E           [[ A == [a] ]]; echo $?
E           [[ a == [A] ]]; echo $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nocasematch-match.test.sh::[[ regex matching[L19]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930a630>
test_file = 'nocasematch-match.test.sh'
test_case = TestCase(name='[[ regex matching', script='shopt -s nocasematch\n[[ a =~ A ]]; echo $?\n[[ A =~ a ]]; echo $?\n[[ a =~...o $?', assertions=[Assertion(type='stdout', value='0\n0\n0\n0', shells=None, variant=None)], line_number=19, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ regex matching (line 19)
E           
E           stdout mismatch:
E             expected: '0\n0\n0\n0'
E             actual:   '1\n1\n1\n1'
E           
E           Expected stdout: '0\n0\n0\n0'
E           Actual stdout:   '1\n1\n1\n1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s nocasematch
E           [[ a =~ A ]]; echo $?
E           [[ A =~ a ]]; echo $?
E           [[ a =~ [A] ]]; echo $?
E           [[ A =~ [a] ]]; echo $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nocasematch-match.test.sh::case matching[L41]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930a7b0>
test_file = 'nocasematch-match.test.sh'
test_case = TestCase(name='case matching', script='shopt -s nocasematch\ncase a in A) echo 0 ;; *) echo 1 ;; esac\ncase A in a) ec...esac', assertions=[Assertion(type='stdout', value='0\n0\n0\n0', shells=None, variant=None)], line_number=41, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: case matching (line 41)
E           
E           stdout mismatch:
E             expected: '0\n0\n0\n0'
E             actual:   '1\n1\n1\n1'
E           
E           Expected stdout: '0\n0\n0\n0'
E           Actual stdout:   '1\n1\n1\n1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s nocasematch
E           case a in A) echo 0 ;; *) echo 1 ;; esac
E           case A in a) echo 0 ;; *) echo 1 ;; esac
E           case a in [A]) echo 0 ;; *) echo 1 ;; esac
E           case A in [a]) echo 0 ;; *) echo 1 ;; esac
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nul-bytes.test.sh::NUL bytes with echo -e[L5]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930a9f0>
test_file = 'nul-bytes.test.sh'
test_case = TestCase(name='NUL bytes with echo -e', script="case $SH in dash) exit ;; esac\n\nshow_hex() { od -A n -t c -t x1; }\n...['zsh'], variant='BUG'), Assertion(type='stdout', value='', shells=['dash'], variant='N-I')], line_number=5, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: NUL bytes with echo -e (line 5)
E           
E           stdout mismatch:
E             expected: '  \\0   -  \\n\n  00  2d  0a'
E             actual:   '  00  2d  0a'
E           
E           Expected stdout: '  \\0   -  \\n\n  00  2d  0a'
E           Actual stdout:   '  00  2d  0a\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           show_hex() { od -A n -t c -t x1; }
E           
E           echo -e '\0-' | show_hex
E           #echo -e '\x00-'
E           #echo -e '\000-'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nul-bytes.test.sh::printf - literal NUL in format string[L27]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930aab0>
test_file = 'nul-bytes.test.sh'
test_case = TestCase(name='printf - literal NUL in format string', script="case $SH in dash|ash) return ;; esac\n\n# Show both pri... variant='BUG'), Assertion(type='stdout', value='', shells=['dash', 'ash'], variant='N-I')], line_number=27, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf - literal NUL in format string (line 27)
E           
E           stdout mismatch:
E             expected: '   x\n  78\n---\n   x\n  78\n---'
E             actual:   '  78  7a\n---\n  78  7a\n---\n  7a'
E           
E           Expected stdout: '   x\n  78\n---\n   x\n  78\n---'
E           Actual stdout:   '  78  7a\n---\n  78  7a\n---\n  7a\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|ash) return ;; esac
E           
E           # Show both printable and hex
E           show_hex() { od -A n -t c -t x1; }
E           
E           printf $'x\U0z' | show_hex
E           echo ---
E           
E           printf $'x\U00z' | show_hex
E           echo ---
E           
E           printf $'\U0z' | show_hex
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nul-bytes.test.sh::printf - \\0 escape shows NUL byte[L63]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930ab70>
test_file = 'nul-bytes.test.sh'
test_case = TestCase(name='printf - \\0 escape shows NUL byte', script="show_hex() { od -A n -t c -t x1; }\n\nprintf '\\0\\n' | sh...ertions=[Assertion(type='stdout', value='  \\0  \\n\n  00  0a', shells=None, variant=None)], line_number=63, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf - \0 escape shows NUL byte (line 63)
E           
E           stdout mismatch:
E             expected: '  \\0  \\n\n  00  0a'
E             actual:   '  00  0a'
E           
E           Expected stdout: '  \\0  \\n\n  00  0a'
E           Actual stdout:   '  00  0a\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show_hex() { od -A n -t c -t x1; }
E           
E           printf '\0\n' | show_hex
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nul-bytes.test.sh::printf - NUL byte in value (OSH and zsh agree)[L72]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930ac30>
test_file = 'nul-bytes.test.sh'
test_case = TestCase(name='printf - NUL byte in value (OSH and zsh agree)', script='case $SH in dash) exit ;; esac\nshow_hex() { o...'], variant='OK'), Assertion(type='stdout-json', value='', shells=['dash'], variant='N-I')], line_number=72, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf - NUL byte in value (OSH and zsh agree) (line 72)
E           
E           stdout mismatch:
E             expected: '  \\n\n  0a\n  \\n\n  0a'
E             actual:   '  0a\n  0a'
E           
E           Expected stdout: '  \\n\n  0a\n  \\n\n  0a'
E           Actual stdout:   '  0a\n  0a\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           show_hex() { od -A n -t c -t x1; }
E           
E           nul=$'\0'
E           echo "$nul" | show_hex
E           printf '%s\n' "$nul" | show_hex
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nul-bytes.test.sh::NUL bytes with echo $'\\0' (OSH and zsh agree)[L96]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930acf0>
test_file = 'nul-bytes.test.sh'
test_case = TestCase(name="NUL bytes with echo $'\\0' (OSH and zsh agree)", script="case $SH in dash) exit ;; esac\nshow_hex() { o...'], variant='OK'), Assertion(type='stdout-json', value='', shells=['dash'], variant='N-I')], line_number=96, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: NUL bytes with echo $'\0' (OSH and zsh agree) (line 96)
E           
E           stdout mismatch:
E             expected: '  \\n\n  0a'
E             actual:   '  0a'
E           
E           Expected stdout: '  \\n\n  0a'
E           Actual stdout:   '  0a\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           show_hex() { od -A n -t c -t x1; }
E           
E           # OSH agrees with ZSH -- so you have the ability to print NUL bytes without
E           # legacy echo -e
E           
E           echo $'\0' | show_hex
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nul-bytes.test.sh::NUL bytes and IFS splitting[L119]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930adb0>
test_file = 'nul-bytes.test.sh'
test_case = TestCase(name='NUL bytes and IFS splitting', script='case $SH in dash) exit ;; esac\n\nargv.py $(echo -e \'\\0\')\narg...zsh'], variant='BUG'), Assertion(type='stdout', value='', shells=['dash'], variant='N-I')], line_number=119, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: NUL bytes and IFS splitting (line 119)
E           
E           stdout mismatch:
E             expected: "[]\n['']\n['ab']\n['ab']"
E             actual:   "['\x00']\n['\x00']\n['a\x00b']\n['a\x00b']"
E           
E           Expected stdout: "[]\n['']\n['ab']\n['ab']"
E           Actual stdout:   "['\x00']\n['\x00']\n['a\x00b']\n['a\x00b']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           argv.py $(echo -e '\0')
E           argv.py "$(echo -e '\0')"
E           argv.py $(echo -e 'a\0b')
E           argv.py "$(echo -e 'a\0b')"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nul-bytes.test.sh::NUL bytes with test -f[L174]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930af30>
test_file = 'nul-bytes.test.sh'
test_case = TestCase(name='NUL bytes with test -f', script="case $SH in dash) exit ;; esac\n\n\ntest -f $'\\0'\necho status=$?\n\n...'ash'], variant='OK'), Assertion(type='stdout', value='', shells=['dash'], variant='N-I')], line_number=174, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: NUL bytes with test -f (line 174)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=0\nstatus=0\nstatus=1'
E             actual:   'status=1\nstatus=0\nstatus=1\nstatus=1'
E           
E           Expected stdout: 'status=1\nstatus=0\nstatus=0\nstatus=1'
E           Actual stdout:   'status=1\nstatus=0\nstatus=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           
E           test -f $'\0'
E           echo status=$?
E           
E           touch foo
E           test -f $'foo\0'
E           echo status=$?
E           
E           test -f $'foo\0bar'
E           echo status=$?
E           
E           test -f $'foobar'
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nul-bytes.test.sh::Compare \\x00 byte versus \\x01 byte - command sub[L237]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930b0b0>
test_file = 'nul-bytes.test.sh'
test_case = TestCase(name='Compare \\x00 byte versus \\x01 byte - command sub', script='# https://stackoverflow.com/questions/3272...', value='len=3\n 2e 01 2e\nlen=3\n 2e 00 2e\nlen=1\n 00', shells=['zsh'], variant='BUG')], line_number=237, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Compare \x00 byte versus \x01 byte - command sub (line 237)
E           
E           stdout mismatch:
E             expected: 'len=3\n 2e 01 2e\nlen=2\n 2e 2e\nlen=0'
E             actual:   'len=3\n  2e  01  2e\nlen=3\n  2e  00  2e\nlen=1\n  00'
E           
E           Expected stdout: 'len=3\n 2e 01 2e\nlen=2\n 2e 2e\nlen=0'
E           Actual stdout:   'len=3\n  2e  01  2e\nlen=3\n  2e  00  2e\nlen=1\n  00\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # https://stackoverflow.com/questions/32722007/is-skipping-ignoring-nul-bytes-on-process-substitution-standardized
E           # bash contains a warning!
E           
E           show_bytes() {
E             echo -n "$1" | od -A n -t x1
E           }
E           
E           s=$(printf '.\001.')
E           echo len=${#s}
E           show_bytes "$s"
E           
E           s=$(printf '.\000.')
E           echo len=${#s}
E           show_bytes "$s"
E           
E           s=$(printf '\000')
E           echo len=${#s} 
E           show_bytes "$s"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nul-bytes.test.sh::Compare \\x00 byte versus \\x01 byte - read builtin[L276]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930b170>
test_file = 'nul-bytes.test.sh'
test_case = TestCase(name='Compare \\x00 byte versus \\x01 byte - read builtin', script='# Hm same odd behavior\n\nshow_string() {...', value='len=3\n 2e 01 2e\nlen=3\n 2e 00 2e\nlen=1\n 00', shells=['zsh'], variant='BUG')], line_number=276, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Compare \x00 byte versus \x01 byte - read builtin (line 276)
E           
E           stdout mismatch:
E             expected: 'len=3\n 2e 01 2e\nlen=2\n 2e 2e\nlen=0'
E             actual:   'len=3\n  2e  01  2e\nlen=3\n  2e  00  2e\nlen=1\n  00'
E           
E           Expected stdout: 'len=3\n 2e 01 2e\nlen=2\n 2e 2e\nlen=0'
E           Actual stdout:   'len=3\n  2e  01  2e\nlen=3\n  2e  00  2e\nlen=1\n  00\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Hm same odd behavior
E           
E           show_string() {
E             read s
E             echo len=${#s}
E             echo -n "$s" | od -A n -t x1
E           }
E           
E           printf '.\001.' | show_string
E           
E           printf '.\000.' | show_string
E           
E           printf '\000' | show_string
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nul-bytes.test.sh::Compare \\x00 byte versus \\x01 byte - read -n[L310]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930b230>
test_file = 'nul-bytes.test.sh'
test_case = TestCase(name='Compare \\x00 byte versus \\x01 byte - read -n', script='case $SH in dash) exit ;; esac\n\nshow_string(...zsh'], variant='BUG'), Assertion(type='stdout', value='', shells=['dash'], variant='N-I')], line_number=310, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Compare \x00 byte versus \x01 byte - read -n (line 310)
E           
E           stdout mismatch:
E             expected: 'len=3\n 2e 01 2e\nlen=2\n 2e 2e\nlen=0'
E             actual:   'len=3\n  2e  01  2e\nlen=3\n  2e  00  2e\nlen=1\n  00'
E           
E           Expected stdout: 'len=3\n 2e 01 2e\nlen=2\n 2e 2e\nlen=0'
E           Actual stdout:   'len=3\n  2e  01  2e\nlen=3\n  2e  00  2e\nlen=1\n  00\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           show_string() {
E             read -n 3 s
E             echo len=${#s}
E             echo -n "$s" | od -A n -t x1
E           }
E           
E           
E           printf '.\001.' | show_string
E           
E           printf '.\000.' | show_string
E           
E           printf '\000' | show_string
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nul-bytes.test.sh::Compare \\x00 byte versus \\x01 byte - mapfile builtin[L354]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930b2f0>
test_file = 'nul-bytes.test.sh'
test_case = TestCase(name='Compare \\x00 byte versus \\x01 byte - mapfile builtin', script='case $SH in dash|mksh|zsh|ash) exit ;;... Assertion(type='stdout', value='', shells=['dash', 'mksh', 'zsh', 'ash'], variant='N-I')], line_number=354, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Compare \x00 byte versus \x01 byte - mapfile builtin (line 354)
E           
E           stdout mismatch:
E             expected: 'len=2\n 2e\n 2e'
E             actual:   'len=2\n  2e  00  2e\n  2e  00  2e'
E           
E           Expected stdout: 'len=2\n 2e\n 2e'
E           Actual stdout:   'len=2\n  2e  00  2e\n  2e  00  2e\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|mksh|zsh|ash) exit ;; esac
E           
E           { 
E             printf '.\000.\n'
E             printf '.\000.\n'
E           } |
E           { mapfile LINES
E             echo len=${#LINES[@]}
E             for line in ${LINES[@]}; do
E               echo -n "$line" | od -A n -t x1
E             done
E           }
E           
E           # bash is INCONSISTENT:
E           # - it TRUNCATES at \0, with 'mapfile'
E           # - rather than just IGNORING \0, with 'read'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nul-bytes.test.sh::Strip ops # ## % %% with NUL bytes[L381]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930b3b0>
test_file = 'nul-bytes.test.sh'
test_case = TestCase(name='Strip ops # ## % %% with NUL bytes', script='show_bytes() {\n  echo -n "$1" | od -A n -t x1\n}\n\ns=$(p...n=2\n 2e 00\nlen=2\n 2e 00\nlen=2\n 00 2e\nlen=2\n 00 2e', shells=['zsh'], variant='BUG')], line_number=381, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Strip ops # ## % %% with NUL bytes (line 381)
E           
E           stdout mismatch:
E             expected: 'len=1\n 2e\n---\nlen=0\nlen=0\nlen=0\nlen=0'
E             actual:   'len=3\n  00  2e  00\n---\nlen=2\n  2e  00\nlen=2\n  2e  00\nlen=2\n  00  2e\nlen=2\n  00  2e'
E           
E           Expected stdout: 'len=1\n 2e\n---\nlen=0\nlen=0\nlen=0\nlen=0'
E           Actual stdout:   'len=3\n  00  2e  00\n---\nlen=2\n  2e  00\nlen=2\n  2e  00\nlen=2\n  00  2e\nlen=2\n  00  2e\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show_bytes() {
E             echo -n "$1" | od -A n -t x1
E           }
E           
E           s=$(printf '\000.\000')
E           echo len=${#s}
E           show_bytes "$s"
E           
E           echo ---
E           
E           t=${s#?}
E           echo len=${#t}
E           show_bytes "$t"
E           
E           t=${s##?}
E           echo len=${#t}
E           show_bytes "$t"
E           
E           t=${s%?}
E           echo len=${#t}
E           show_bytes "$t"
E           
E           t=${s%%?}
E           echo len=${#t}
E           show_bytes "$t"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nul-bytes.test.sh::Issue 2269 Reduction[L434]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930b470>
test_file = 'nul-bytes.test.sh'
test_case = TestCase(name='Issue 2269 Reduction', script='show_bytes() {\n  echo -n "$1" | od -A n -t x1\n}\n\ns=$(printf \'\\000x...en=2\n 00 78\nlen=1\n 78\n---\nlen=2\n 01 78\nlen=1\n 78', shells=['zsh'], variant='BUG')], line_number=434, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Issue 2269 Reduction (line 434)
E           
E           stdout mismatch:
E             expected: 'len=1\n 78\nlen=0\n---\nlen=2\n 01 78\nlen=1\n 78'
E             actual:   'len=2\n  00  78\nlen=1\n  78\n---\nlen=2\n  01  78\nlen=1\n  78'
E           
E           Expected stdout: 'len=1\n 78\nlen=0\n---\nlen=2\n 01 78\nlen=1\n 78'
E           Actual stdout:   'len=2\n  00  78\nlen=1\n  78\n---\nlen=2\n  01  78\nlen=1\n  78\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show_bytes() {
E             echo -n "$1" | od -A n -t x1
E           }
E           
E           s=$(printf '\000x')
E           echo len=${#s}
E           show_bytes "$s"
E           
E           # strip one char from the front
E           s=${s#?}
E           echo len=${#s}
E           show_bytes "$s"
E           
E           echo ---
E           
E           s=$(printf '\001x')
E           echo len=${#s}
E           show_bytes "$s"
E           
E           # strip one char from the front
E           s=${s#?}
E           echo len=${#s}
E           show_bytes "$s"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[nul-bytes.test.sh::Issue 2269 - Do NUL bytes match ? in ${a#?}[L484]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930b530>
test_file = 'nul-bytes.test.sh'
test_case = TestCase(name='Issue 2269 - Do NUL bytes match ? in ${a#?}', script='# https://github.com/oils-for-unix/oils/issues/22...d that\'"\'"\'s it!\nthat\'s it!\n---\nwrites binary data', shells=['zsh'], variant='OK')], line_number=484, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Issue 2269 - Do NUL bytes match ? in ${a#?} (line 484)
E           
E           stdout mismatch:
E             expected: 'escaped that\'"\'"\'s it!\nthat\'s it!\n---\nescaped ::'
E             actual:   'escaped thats it!")"\nthats it!")"\n---\nescaped :\x00:'
E           
E           Expected stdout: 'escaped that\'"\'"\'s it!\nthat\'s it!\n---\nescaped ::'
E           Actual stdout:   'escaped thats it!")"\nthats it!")"\n---\nescaped :\x00:\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # https://github.com/oils-for-unix/oils/issues/2269
E           
E           escape_arg() {
E           	a="$1"
E           	until [ -z "$a" ]; do
E           		case "$a" in
E           		(\'*) printf "'\"'\"'";;
E           		(*) printf %.1s "$a";;
E           		esac
E           		a="${a#?}"
E               echo len=${#a} >&2
E           	done
E           }
E           
E           # encode
E           phrase="$(escape_arg "that's it!")"
E           echo escaped "$phrase"
E           
E           # decode
E           eval "printf '%s\\n' '$phrase'"
E           
E           echo ---
E           
E           # harder input: NUL surrounded with ::
E           arg="$(printf ':\000:')" 
E           #echo "arg=$arg"
E           
E           case $SH in
E             zsh) echo 'writes binary data' ;;
E             *) echo escaped "$(escape_arg "$arg")" ;;
E           esac
E           #echo "arg=$arg"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[paren-ambiguity.test.sh::(( closed with ) ) after multiple lines is command - #2337[L4]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930b5f0>
test_file = 'paren-ambiguity.test.sh'
test_case = TestCase(name='(( closed with ) ) after multiple lines is command - #2337', script='(( echo 1\necho 2\n(( x ))\n: $(( ...o 3\n) )', assertions=[Assertion(type='stdout', value='1\n2\n3', shells=None, variant=None)], line_number=4, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: (( closed with ) ) after multiple lines is command - #2337 (line 4)
E           
E           stdout mismatch:
E             expected: '1\n2\n3'
E             actual:   ''
E           
E           Expected stdout: '1\n2\n3'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           (( echo 1
E           echo 2
E           (( x ))
E           : $(( x ))
E           echo 3
E           ) )
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[paren-ambiguity.test.sh::$(( closed with ) ) after multiple lines is command - #2337[L18]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930b6b0>
test_file = 'paren-ambiguity.test.sh'
test_case = TestCase(name='$(( closed with ) ) after multiple lines is command - #2337', script='echo $(( echo 1\necho 2\n(( x ))\... variant='BUG'), Assertion(type='stdout', value='', shells=['dash', 'ash'], variant='BUG')], line_number=18, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $(( closed with ) ) after multiple lines is command - #2337 (line 18)
E           
E           stdout mismatch:
E             expected: '1 2 3'
E             actual:   '0'
E           
E           Expected stdout: '1 2 3'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo $(( echo 1
E           echo 2
E           (( x ))
E           : $(( x ))
E           echo 3
E           ) )
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[paren-ambiguity.test.sh::$(( closed with )) after multiple lines is parse error - #2337[L56]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930b830>
test_file = 'paren-ambiguity.test.sh'
test_case = TestCase(name='$(( closed with )) after multiple lines is parse error - #2337', script="$SH -c '\necho $(( echo 1\nech... echo ok\nfi", assertions=[Assertion(type='stdout', value='ok', shells=None, variant=None)], line_number=56, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $(( closed with )) after multiple lines is parse error - #2337 (line 56)
E           
E           stdout mismatch:
E             expected: 'ok'
E             actual:   '0'
E           
E           Expected stdout: 'ok'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           $SH -c '
E           echo $(( echo 1
E           echo 2
E           (( x ))
E           : $(( x ))
E           echo 3
E           ))
E           '
E           if test $? -ne 0; then
E             echo ok
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[paren-ambiguity.test.sh::$((which example - command sub versus arith sub - gnunet-gtk package[L130]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930bbf0>
test_file = 'paren-ambiguity.test.sh'
test_case = TestCase(name='$((which example - command sub versus arith sub - gnunet-gtk package', script='gtk_update_icon_cache_bi...variant='N-I'), Assertion(type='stdout', value='', shells=['dash', 'ash'], variant='N-I')], line_number=130, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $((which example - command sub versus arith sub - gnunet-gtk package (line 130)
E           
E           stdout mismatch:
E             expected: 'bye'
E             actual:   ''
E           stderr mismatch:
E             expected: ''
E             actual:   'bash: division by 0'
E           
E           Expected stdout: 'bye'
E           Actual stdout:   ''
E           Expected stderr: ''
E           Actual stderr:   'bash: division by 0\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           gtk_update_icon_cache_bin="$((which gtk-update-icon-cache ||
E           echo /opt/gnome/bin/gtk-update-icon-cache)2>/dev/null)"
E           
E           echo bye
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::Long Token - 65535 bytes[L4]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930bcb0>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='Long Token - 65535 bytes', script='python2 -c \'print("echo -n %s" % ("x" * 65535))\' > tmp.sh\n$SH tmp...es out', assertions=[Assertion(type='stdout', value='65535 out', shells=None, variant=None)], line_number=4, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Long Token - 65535 bytes (line 4)
E           
E           stdout mismatch:
E             expected: '65535 out'
E             actual:   '0 out'
E           
E           Expected stdout: '65535 out'
E           Actual stdout:   '0 out\n'
E           Expected stderr: None
E           Actual stderr:   'bash: python2: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           python2 -c 'print("echo -n %s" % ("x" * 65535))' > tmp.sh
E           $SH tmp.sh > out
E           wc --bytes out
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::Token that's too long for Oils - 65536 bytes[L15]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930bd70>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name="Token that's too long for Oils - 65536 bytes", script='python2 -c \'print("echo -n %s" % ("x" * 65536))...sertions=[Assertion(type='stdout', value='status=0\n65536 out', shells=None, variant=None)], line_number=15, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Token that's too long for Oils - 65536 bytes (line 15)
E           
E           stdout mismatch:
E             expected: 'status=0\n65536 out'
E             actual:   'status=0\n0 out'
E           
E           Expected stdout: 'status=0\n65536 out'
E           Actual stdout:   'status=0\n0 out\n'
E           Expected stderr: None
E           Actual stderr:   'bash: python2: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           python2 -c 'print("echo -n %s" % ("x" * 65536))' > tmp.sh
E           $SH tmp.sh > out
E           echo status=$?
E           wc --bytes out
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::Bad braced var sub -- not allowed[L32]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10930bef0>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='Bad braced var sub -- not allowed', script='echo ${%}', assertions=[Assertion(type='status', value=2, s...e, variant=None), Assertion(type='status', value=1, shells=['bash', 'mksh'], variant='OK')], line_number=32, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Bad braced var sub -- not allowed (line 32)
E           
E           status mismatch: expected one of [2, 1], got 0
E           
E           Expected stdout: None
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           echo ${%}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::Incomplete while[L46]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093300b0>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='Incomplete while', script='echo hi; while\necho status=$?', assertions=[Assertion(type='status', value=...ells=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=46, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Incomplete while (line 46)
E           
E           Execution error: Expected 'do' after condition at line 2, column 15
E           
E           
E           Script:
E           ---
E           echo hi; while
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::Incomplete for[L53]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109330170>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='Incomplete for', script='echo hi; for\necho status=$?', assertions=[Assertion(type='status', value=2, s...ells=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=53, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Incomplete for (line 53)
E           
E           Execution error: Expected 'do' in for loop at line 2, column 6
E           
E           
E           Script:
E           ---
E           echo hi; for
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[parse-errors.test.sh::Incomplete if[L60]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109330230>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='Incomplete if', script='echo hi; if\necho status=$?', assertions=[Assertion(type='status', value=2, she...ells=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=60, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Incomplete if (line 60)
E           
E           Execution error: Expected 'then' after condition at line 2, column 15
E           
E           
E           Script:
E           ---
E           echo hi; if
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[parse-errors.test.sh::do unexpected[L67]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093302f0>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='do unexpected', script='do echo hi', assertions=[Assertion(type='status', value=2, shells=None, variant...ells=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=67, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: do unexpected (line 67)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 1, column 1
E           
E           
E           Script:
E           ---
E           do echo hi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::} is a parse error[L73]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093303b0>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='} is a parse error', script='}\necho should not get here', assertions=[Assertion(type='stdout-json', va...ells=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=73, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: } is a parse error (line 73)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 1, column 1
E           
E           
E           Script:
E           ---
E           }
E           echo should not get here
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::{ is its own word, needs a space[L80]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109330470>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='{ is its own word, needs a space', script='# bash and mksh give parse time error because of }\n# dash g...ells=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=80, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: { is its own word, needs a space (line 80)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 3, column 6
E           
E           
E           Script:
E           ---
E           # bash and mksh give parse time error because of }
E           # dash gives 127 as runtime error
E           {ls; }
E           echo "status=$?"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::} on the second line[L89]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109330530>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='} on the second line', script='set -o errexit\n{ls;\n}', assertions=[Assertion(type='status', value=127, shells=None, variant=None)], line_number=89, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: } on the second line (line 89)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 3, column 1
E           
E           
E           Script:
E           ---
E           set -o errexit
E           {ls;
E           }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::bad var name globally isn't parsed like an assignment[L106]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093306b0>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name="bad var name globally isn't parsed like an assignment", script='# bash and dash disagree on exit code.\nFOO-BAR=foo', assertions=[Assertion(type='status', value=127, shells=None, variant=None)], line_number=106, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: bad var name globally isn't parsed like an assignment (line 106)
E           
E           status mismatch: expected 127, got 1
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: FOO-BAR=foo: command not found\n'
E           Expected status: 127
E           Actual status:   1
E           
E           Script:
E           ---
E           # bash and dash disagree on exit code.
E           FOO-BAR=foo
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::misplaced parentheses are not a subshell[L126]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093308f0>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='misplaced parentheses are not a subshell', script='echo a(b)', assertions=[Assertion(type='status', val...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=126, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: misplaced parentheses are not a subshell (line 126)
E           
E           status mismatch: expected 2, got 1
E           
E           Expected stdout: None
E           Actual stdout:   'a\n'
E           Expected stderr: None
E           Actual stderr:   'bash: b: command not found\n'
E           Expected status: 2
E           Actual status:   1
E           
E           Script:
E           ---
E           echo a(b)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::incomplete command sub[L131]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093309b0>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='incomplete command sub', script='$(x', assertions=[Assertion(type='status', value=2, shells=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=131, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: incomplete command sub (line 131)
E           
E           status mismatch: expected 2, got 1
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: : command not found\n'
E           Expected status: 2
E           Actual status:   1
E           
E           Script:
E           ---
E           $(x
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::incomplete backticks[L136]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109330a70>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='incomplete backticks', script='`x', assertions=[Assertion(type='status', value=2, shells=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=136, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: incomplete backticks (line 136)
E           
E           status mismatch: expected 2, got 1
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: : command not found\n'
E           Expected status: 2
E           Actual status:   1
E           
E           Script:
E           ---
E           `x
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[parse-errors.test.sh::misplaced ;;[L141]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109330b30>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='misplaced ;;', script='echo 1 ;; echo 2', assertions=[Assertion(type='stdout-json', value='', shells=No...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=141, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: misplaced ;; (line 141)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 1, column 8
E           
E           
E           Script:
E           ---
E           echo 1 ;; echo 2
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::empty clause in [[[L147]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109330bf0>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='empty clause in [[', script='# regression test for commit 451ca9e2b437e0326fc8155783d970a6f32729d8\n[[ ...'dash'], variant='N-I'), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=147, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: empty clause in [[ (line 147)
E           
E           Execution error: Expected conditional expression at line 2, column 4
E           
E           
E           Script:
E           ---
E           # regression test for commit 451ca9e2b437e0326fc8155783d970a6f32729d8
E           [[ || true ]]
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::interactive parse error (regression)[L156]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109330cb0>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='interactive parse error (regression)', script="flags=''\ncase $SH in\n  bash*|*osh)\n    flags='--rcfil...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=156, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: interactive parse error (regression) (line 156)
E           
E           status mismatch: expected 2, got 127
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: --rcfile: No such file or directory\n'
E           Expected status: 2
E           Actual status:   127
E           
E           Script:
E           ---
E           flags=''
E           case $SH in
E             bash*|*osh)
E               flags='--rcfile /dev/null'
E               ;;
E           esac
E           $SH $flags -i -c 'var=)'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::array literal inside array is a parse error[L169]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109330d70>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='array literal inside array is a parse error', script='a=( inside=() )\necho len=${#a[@]}', assertions=[..., variant='BUG'), Assertion(type='stdout', value='len=0', shells=['bash'], variant='BUG')], line_number=169, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: array literal inside array is a parse error (line 169)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 1, column 15
E           
E           
E           Script:
E           ---
E           a=( inside=() )
E           echo len=${#a[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::array literal inside loop is a parse error[L178]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109330e30>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='array literal inside loop is a parse error', script='f() {\n  for x in a=(); do\n    echo x=$x\n  done\...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=178, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: array literal inside loop is a parse error (line 178)
E           
E           Execution error: Expected 'do' in for loop at line 2, column 12
E           
E           
E           Script:
E           ---
E           f() {
E             for x in a=(); do
E               echo x=$x
E             done
E             echo done
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::array literal in case[L190]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109330ef0>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='array literal in case', script='f() {\n  case a=() in\n    foo)\n      echo hi\n      ;;\n  esac\n}\nf'...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=190, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: array literal in case (line 190)
E           
E           Execution error: Expected word after 'case' at line 2, column 8
E           
E           
E           Script:
E           ---
E           f() {
E             case a=() in
E               foo)
E                 echo hi
E                 ;;
E             esac
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[parse-errors.test.sh::%foo=() is parse error (regression)[L203]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109330fb0>
test_file = 'parse-errors.test.sh'
test_case = TestCase(name='%foo=() is parse error (regression)', script='# Lit_VarLike and then (, but NOT at the beginning of a w...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=203, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: %foo=() is parse error (regression) (line 203)
E           
E           Execution error: Expected compound command as function body at line 5, column 1
E           
E           
E           Script:
E           ---
E           # Lit_VarLike and then (, but NOT at the beginning of a word.
E           
E           f() {
E             %foo=()
E           }
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[pipeline.test.sh::While Loop ends pipeline[L27]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093312b0>
test_file = 'pipeline.test.sh'
test_case = TestCase(name='While Loop ends pipeline', script='seq 3 | while read i\ndo\n  echo ".$i"\ndone', assertions=[Assertion(type='stdout', value='.1\n.2\n.3', shells=None, variant=None)], line_number=27, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: While Loop ends pipeline (line 27)
E           
E           stdout mismatch:
E             expected: '.1\n.2\n.3'
E             actual:   ''
E           
E           Expected stdout: '.1\n.2\n.3'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           seq 3 | while read i
E           do
E             echo ".$i"
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[pipeline.test.sh::Initial value of PIPESTATUS is empty string[L53]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093315b0>
test_file = 'pipeline.test.sh'
test_case = TestCase(name='Initial value of PIPESTATUS is empty string', script='case $SH in dash|zsh) exit ;; esac\n\necho pipest... variant='BUG'), Assertion(type='stdout', value='', shells=['dash', 'zsh'], variant='N-I')], line_number=53, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Initial value of PIPESTATUS is empty string (line 53)
E           
E           stdout mismatch:
E             expected: 'pipestatus'
E             actual:   'pipestatus 0'
E           
E           Expected stdout: 'pipestatus'
E           Actual stdout:   'pipestatus 0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|zsh) exit ;; esac
E           
E           echo pipestatus ${PIPESTATUS[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_________ TestBashSpecTests.test_spec_case[pipeline.test.sh::|&[L115]] _________

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093318b0>
test_file = 'pipeline.test.sh'
test_case = TestCase(name='|&', script='stdout_stderr.py |& cat', assertions=[Assertion(type='stdout', value='STDERR\nSTDOUT', she...['osh'], variant='N-I'), Assertion(type='status', value=1, shells=['osh'], variant='N-I')], line_number=115, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: |& (line 115)
E           
E           stdout mismatch:
E             expected: 'STDERR\nSTDOUT'
E             actual:   'STDOUT'
E           
E           Expected stdout: 'STDERR\nSTDOUT'
E           Actual stdout:   'STDOUT\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           stdout_stderr.py |& cat
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[pipeline.test.sh::! turns non-zero into zero[L127]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109331970>
test_file = 'pipeline.test.sh'
test_case = TestCase(name='! turns non-zero into zero', script="! $SH -c 'exit 42'; echo $?", assertions=[Assertion(type='stdout',... shells=None, variant=None), Assertion(type='status', value=0, shells=None, variant=None)], line_number=127, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ! turns non-zero into zero (line 127)
E           
E           stdout mismatch:
E             expected: '0'
E             actual:   ''
E           status mismatch: expected 0, got 42
E           
E           Expected stdout: '0'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   42
E           
E           Script:
E           ---
E           ! $SH -c 'exit 42'; echo $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[pipeline.test.sh::! turns zero into 1[L132]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109331a30>
test_file = 'pipeline.test.sh'
test_case = TestCase(name='! turns zero into 1', script="! $SH -c 'exit 0'; echo $?", assertions=[Assertion(type='stdout', value='1', shells=None, variant=None), Assertion(type='status', value=0, shells=None, variant=None)], line_number=132, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ! turns zero into 1 (line 132)
E           
E           stdout mismatch:
E             expected: '1'
E             actual:   ''
E           
E           Expected stdout: '1'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           ! $SH -c 'exit 0'; echo $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[pipeline.test.sh::! is not a command[L175]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109331df0>
test_file = 'pipeline.test.sh'
test_case = TestCase(name='! is not a command', script="v='!'\n$v echo hi", assertions=[Assertion(type='status', value=127, shells=None, variant=None)], line_number=175, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ! is not a command (line 175)
E           
E           status mismatch: expected 127, got 1
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: !: command not found\n'
E           Expected status: 127
E           Actual status:   1
E           
E           Script:
E           ---
E           v='!'
E           $v echo hi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[pipeline.test.sh::Evaluation of argv[0] in pipeline occurs in child[L180]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109331eb0>
test_file = 'pipeline.test.sh'
test_case = TestCase(name='Evaluation of argv[0] in pipeline occurs in child', script='${cmd=echo} hi | wc -l\necho "cmd=$cmd"', a...riant=None), Assertion(type='stdout', value='1\ncmd=echo', shells=['zsh'], variant='BUG')], line_number=180, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Evaluation of argv[0] in pipeline occurs in child (line 180)
E           
E           stdout mismatch:
E             expected: '1\ncmd='
E             actual:   '1\ncmd=echo'
E           
E           Expected stdout: '1\ncmd='
E           Actual stdout:   '1\ncmd=echo\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           ${cmd=echo} hi | wc -l
E           echo "cmd=$cmd"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[pipeline.test.sh::bash/dash/mksh run the last command is run in its own process[L192]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109331f70>
test_file = 'pipeline.test.sh'
test_case = TestCase(name='bash/dash/mksh run the last command is run in its own process', script='echo hi | read line\necho "line...), Assertion(type='stdout', value='line=', shells=['bash', 'dash', 'mksh'], variant='OK')], line_number=192, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: bash/dash/mksh run the last command is run in its own process (line 192)
E           
E           stdout mismatch:
E             expected: 'line='
E             actual:   'line=hi'
E           
E           Expected stdout: 'line='
E           Actual stdout:   'line=hi\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo hi | read line
E           echo "line=$line"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[pipeline.test.sh::shopt -s lastpipe (always on in OSH)[L205]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093320f0>
test_file = 'pipeline.test.sh'
test_case = TestCase(name='shopt -s lastpipe (always on in OSH)', script='shopt -s lastpipe\ni=0\nseq 3 | while read line; do\n  (...iant=None), Assertion(type='stdout', value='i=0', shells=['dash', 'mksh'], variant='N-I')], line_number=205, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: shopt -s lastpipe (always on in OSH) (line 205)
E           
E           stdout mismatch:
E             expected: 'i=3'
E             actual:   'i=0'
E           
E           Expected stdout: 'i=3'
E           Actual stdout:   'i=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s lastpipe
E           i=0
E           seq 3 | while read line; do
E             (( i++ ))
E           done
E           echo i=$i
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[pipeline.test.sh::SIGPIPE causes pipeline to die (regression for issue #295)[L216]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093321b0>
test_file = 'pipeline.test.sh'
test_case = TestCase(name='SIGPIPE causes pipeline to die (regression for issue #295)', script="cat /dev/urandom | sleep 0.1\necho..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['dash'], variant='N-I')], line_number=216, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: SIGPIPE causes pipeline to die (regression for issue #295) (line 216)
E           
E           stdout mismatch:
E             expected: '141 0'
E             actual:   '1 0'
E           
E           Expected stdout: '141 0'
E           Actual stdout:   '1 0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           cat /dev/urandom | sleep 0.1
E           echo ${PIPESTATUS[@]}
E           
E           # hm bash gives '1 0' which seems wrong
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[pipeline.test.sh::Pipeline in eval[L240]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109332330>
test_file = 'pipeline.test.sh'
test_case = TestCase(name='Pipeline in eval', script="ls /dev/null | eval 'cat | cat' | wc -l", assertions=[Assertion(type='stdout', value='1', shells=None, variant=None)], line_number=240, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Pipeline in eval (line 240)
E           
E           stdout mismatch:
E             expected: '1'
E             actual:   '0'
E           
E           Expected stdout: '1'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           ls /dev/null | eval 'cat | cat' | wc -l
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[pipeline.test.sh::shopt -s lastpipe and shopt -s no_last_fork interaction[L247]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093323f0>
test_file = 'pipeline.test.sh'
test_case = TestCase(name='shopt -s lastpipe and shopt -s no_last_fork interaction', script="case $SH in dash) exit ;; esac\n\n$SH...s=None, variant=None), Assertion(type='stdout', value='', shells=['dash'], variant='N-I')], line_number=247, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: shopt -s lastpipe and shopt -s no_last_fork interaction (line 247)
E           
E           stdout mismatch:
E             expected: '0\nstatus=1\n0\nstatus=1'
E             actual:   '0\nstatus=1\nstatus=0'
E           
E           Expected stdout: '0\nstatus=1\n0\nstatus=1'
E           Actual stdout:   '0\nstatus=1\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           $SH -c '
E           shopt -s lastpipe
E           set -o errexit
E           set -o pipefail
E           
E           ls | false | wc -l'
E           echo status=$?
E           
E           # Why does this give status 0?  It should fail
E           
E           $SH -c '
E           shopt -s lastpipe
E           shopt -s no_fork_last  # OSH only
E           set -o errexit
E           set -o pipefail
E           
E           ls | false | wc -l'
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[posix.test.sh::Empty for loop without in.  Do can be on the same line I guess.[L16]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109332570>
test_file = 'posix.test.sh'
test_case = TestCase(name='Empty for loop without in.  Do can be on the same line I guess.', script='set -- a b\nfor x do\n  echo ...ne', assertions=[Assertion(type='stdout', value='hi\na\nhi\nb', shells=None, variant=None)], line_number=16, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Empty for loop without in.  Do can be on the same line I guess. (line 16)
E           
E           stdout mismatch:
E             expected: 'hi\na\nhi\nb'
E             actual:   ''
E           
E           Expected stdout: 'hi\na\nhi\nb'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- a b
E           for x do
E             echo hi
E             echo $x
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[posix.test.sh::Empty action for case is syntax error[L57]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109332930>
test_file = 'posix.test.sh'
test_case = TestCase(name='Empty action for case is syntax error', script="# POSIX grammar seems to allow this, but bash and dash ...ells=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=57, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Empty action for case is syntax error (line 57)
E           
E           Execution error: Expected pattern in case item at line 5, column 4
E           
E           
E           Script:
E           ---
E           # POSIX grammar seems to allow this, but bash and dash don't.  Need ;;
E           foo=a
E           case $foo in
E             a)
E             b)
E               echo A ;;
E             d)
E           esac
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[posix.test.sh::Bare semi-colon not allowed[L86]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109332b70>
test_file = 'posix.test.sh'
test_case = TestCase(name='Bare semi-colon not allowed', script="# This is disallowed by the grammar; bash and dash don't accept i...ells=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=86, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Bare semi-colon not allowed (line 86)
E           
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           # This is disallowed by the grammar; bash and dash don't accept it.
E           ;
E           
E           
E           
E           #
E           # Explicit tests
E           #
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[posix.test.sh::Command substitution in default[L100]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109332c30>
test_file = 'posix.test.sh'
test_case = TestCase(name='Command substitution in default', script='echo ${x:-$(ls -d /bin)}', assertions=[Assertion(type='stdout', value='/bin', shells=None, variant=None)], line_number=100, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Command substitution in default (line 100)
E           
E           Execution error: Command substitution requires async expansion
E           
E           
E           Script:
E           ---
E           echo ${x:-$(ls -d /bin)}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[quote.test.sh::Backslash escapes inside double quoted string[L75]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093339b0>
test_file = 'quote.test.sh'
test_case = TestCase(name='Backslash escapes inside double quoted string', script='echo "\\$ \\\\ \\\\ \\p \\q"', assertions=[Assertion(type='stdout', value='$ \\ \\ \\p \\q', shells=None, variant=None)], line_number=75, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Backslash escapes inside double quoted string (line 75)
E           
E           stdout mismatch:
E             expected: '$ \\ \\ \\p \\q'
E             actual:   '\\$ \\ \\ \\p \\q'
E           
E           Expected stdout: '$ \\ \\ \\p \\q'
E           Actual stdout:   '\\$ \\ \\ \\p \\q\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo "\$ \\ \\ \p \q"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[quote.test.sh::Unterminated double quote[L124]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109333fb0>
test_file = 'quote.test.sh'
test_case = TestCase(name='Unterminated double quote', script='#\n# TODO: Might be another section?\n#', assertions=[Assertion(typ...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=124, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Unterminated double quote (line 124)
E           
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           #
E           # TODO: Might be another section?
E           #
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[quote.test.sh::$'' octal escapes don't have leading 0[L182]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109358470>
test_file = 'quote.test.sh'
test_case = TestCase(name="$'' octal escapes don't have leading 0", script="# echo -e syntax is echo -e \\0377\necho -n $'\\001' $...ant=None), Assertion(type='stdout', value=' $ 001 $ 377', shells=['dash'], variant='N-I')], line_number=182, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $'' octal escapes don't have leading 0 (line 182)
E           
E           stdout mismatch:
E             expected: ' 001 377'
E             actual:   ' 001     303 277'
E           
E           Expected stdout: ' 001 377'
E           Actual stdout:   ' 001     303 277\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # echo -e syntax is echo -e \0377
E           echo -n $'\001' $'\377' | od -A n -c | sed 's/ \+/ /g'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[quote.test.sh::$'' octal escapes with fewer than 3 chars[L192]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109358530>
test_file = 'quote.test.sh'
test_case = TestCase(name="$'' octal escapes with fewer than 3 chars", script="echo $'\\1 \\11 \\11 \\111' | od -A n -c | sed 's/ ...), Assertion(type='stdout', value=' $ 001 \\t \\t I \\n', shells=['dash'], variant='N-I')], line_number=192, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $'' octal escapes with fewer than 3 chars (line 192)
E           
E           stdout mismatch:
E             expected: ' 001 \\t \\t I \\n'
E             actual:   ' 001      \\t      \\t       I  \\n'
E           
E           Expected stdout: ' 001 \\t \\t I \\n'
E           Actual stdout:   ' 001      \\t      \\t       I  \\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo $'\1 \11 \11 \111' | od -A n -c | sed 's/ \+/ /g'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[quote.test.sh::$'' supports \\cA escape for Ctrl-A - mask with 0x1f[L240]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093588f0>
test_file = 'quote.test.sh'
test_case = TestCase(name="$'' supports \\cA escape for Ctrl-A - mask with 0x1f", script='# note: AT&T ksh supports this too\n\nca... variant=None), Assertion(type='stdout', value='', shells=['dash', 'ash'], variant='N-I')], line_number=240, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $'' supports \cA escape for Ctrl-A - mask with 0x1f (line 240)
E           
E           stdout mismatch:
E             expected: ' 020 031   -\n  10  19  2d\n\n 001 032\n  01  1a\n\n 001 032\n  01  1a\n\n  \\r  \\v 002\n  0d  0b  02'
E             actual:   '  5c  63  30  5c  63  39  2d\n\n  5c  63  61  5c  63  7a\n\n  5c  63  41  5c  63  5a\n\n  5c  63  2d  5c  63  2b  5c  63  22'
E           
E           Expected stdout: ' 020 031   -\n  10  19  2d\n\n 001 032\n  01  1a\n\n 001 032\n  01  1a\n\n  \\r  \\v 002\n  0d  0b  02'
E           Actual stdout:   '  5c  63  30  5c  63  39  2d\n\n  5c  63  61  5c  63  7a\n\n  5c  63  41  5c  63  5a\n\n  5c  63  2d  5c  63  2b  5c  63  22\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # note: AT&T ksh supports this too
E           
E           case $SH in dash|ash) exit ;; esac
E           
E           show_bytes() {
E             # -A n - no file offset
E             od -A n -t c -t x1
E           }
E           
E           # this isn't special
E           # mksh doesn't like it
E           #echo -n $'\c' | show_bytes
E           
E           echo -n $'\c0\c9-' | show_bytes
E           echo
E           
E           # control chars are case insensitive
E           echo -n $'\ca\cz' | show_bytes
E           echo
E           
E           echo -n $'\cA\cZ' | show_bytes
E           echo
E           
E           echo -n $'\c-\c+\c"' | show_bytes
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[quote.test.sh::\\c' is an escape, unlike bash[L282]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093589b0>
test_file = 'quote.test.sh'
test_case = TestCase(name="\\c' is an escape, unlike bash", script="# mksh and ksh agree this is an esacpe\n\ncase $SH in dash|ash...variant='BUG'), Assertion(type='stdout', value='', shells=['dash', 'ash'], variant='N-I')], line_number=282, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \c' is an escape, unlike bash (line 282)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   '\\c | show_bytes'
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   '\\c | show_bytes'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           # mksh and ksh agree this is an esacpe
E           
E           case $SH in dash|ash) exit ;; esac
E           
E           show_bytes() {
E             # -A n - no file offset
E             od -A n -t c -t x1
E           }
E           
E           # this isn't special
E           # mksh doesn't like it
E           echo -n $'\c'' | show_bytes
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redir-order.test.sh::subshell + redirect order[L14]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109358b30>
test_file = 'redir-order.test.sh'
test_case = TestCase(name='subshell + redirect order', script='echo hello > OSCFLAGS\n(echo `cat OSCFLAGS` "world") > OSCFLAGS\ncat OSCFLAGS', assertions=[Assertion(type='stdout', value='world', shells=None, variant=None)], line_number=14, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: subshell + redirect order (line 14)
E           
E           stdout mismatch:
E             expected: 'world'
E             actual:   'hello world'
E           
E           Expected stdout: 'world'
E           Actual stdout:   'hello world\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo hello > OSCFLAGS
E           (echo `cat OSCFLAGS` "world") > OSCFLAGS
E           cat OSCFLAGS
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redir-order.test.sh::for word + redirect order[L24]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109358bf0>
test_file = 'redir-order.test.sh'
test_case = TestCase(name='for word + redirect order', script='echo hello > OSCFLAGS\nfor x in `cat OSCFLAGS` world; do\n  echo $x... OSCFLAGS', assertions=[Assertion(type='stdout', value='world', shells=None, variant=None)], line_number=24, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: for word + redirect order (line 24)
E           
E           stdout mismatch:
E             expected: 'world'
E             actual:   'hello\nworld\nhello'
E           
E           Expected stdout: 'world'
E           Actual stdout:   'hello\nworld\nhello\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo hello > OSCFLAGS
E           for x in `cat OSCFLAGS` world; do
E             echo $x
E           done > OSCFLAGS
E           cat OSCFLAGS
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redir-order.test.sh::case word + redirect order[L36]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109358cb0>
test_file = 'redir-order.test.sh'
test_case = TestCase(name='case word + redirect order', script='echo hello > OSCFLAGS\ncase `cat OSCFLAGS` in\n  hello)\n    echo ... OSCFLAGS', assertions=[Assertion(type='stdout', value='other', shells=None, variant=None)], line_number=36, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: case word + redirect order (line 36)
E           
E           stdout mismatch:
E             expected: 'other'
E             actual:   'hello\nhello'
E           
E           Expected stdout: 'other'
E           Actual stdout:   'hello\nhello\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo hello > OSCFLAGS
E           case `cat OSCFLAGS` in
E             hello)
E               echo hello
E               ;;
E             *)
E               echo other
E               ;;
E           esac > OSCFLAGS
E           cat OSCFLAGS
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redir-order.test.sh::[[ + redirect order[L53]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109358d70>
test_file = 'redir-order.test.sh'
test_case = TestCase(name='[[ + redirect order', script="case $SH in dash|ash) exit ;; esac\n\necho hello > OSCFLAGS\n\n[[ `cat OS..., variant=None), Assertion(type='stdout', value='', shells=['dash', 'ash'], variant='N-I')], line_number=53, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [[ + redirect order (line 53)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=0'
E             actual:   'status=0\nstatus=1'
E           
E           Expected stdout: 'status=1\nstatus=0'
E           Actual stdout:   'status=0\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|ash) exit ;; esac
E           
E           echo hello > OSCFLAGS
E           
E           [[ `cat OSCFLAGS` = hello ]] > OSCFLAGS
E           echo status=$?
E           
E           # it is the empty string!
E           [[ `cat OSCFLAGS` = '' ]] > OSCFLAGS
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-command.test.sh::>$file touches a file[L8]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109358e30>
test_file = 'redirect-command.test.sh'
test_case = TestCase(name='>$file touches a file', script='rm -f myfile\ntest -f myfile\necho status=$?\n\n>myfile\ntest -f myfile...['zsh'], variant='BUG'), Assertion(type='stderr-json', value='', shells=None, variant=None)], line_number=8, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: >$file touches a file (line 8)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=0'
E             actual:   'status=1\nstatus=1'
E           
E           Expected stdout: 'status=1\nstatus=0'
E           Actual stdout:   'status=1\nstatus=1\n'
E           Expected stderr: ''
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           rm -f myfile
E           test -f myfile
E           echo status=$?
E           
E           >myfile
E           test -f myfile
E           echo status=$?
E           
E           
E           
E           # regression for OSH
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-command.test.sh::$(< $file) yields the contents of the file[L29]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109358ef0>
test_file = 'redirect-command.test.sh'
test_case = TestCase(name='$(< $file) yields the contents of the file', script='seq 2 3 > myfile\nfoo=$(< myfile)\nargv.py "$foo"'...ne), Assertion(type='stdout', value="['']", shells=['dash', 'ash', 'yash'], variant='N-I')], line_number=29, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $(< $file) yields the contents of the file (line 29)
E           
E           stdout mismatch:
E             expected: "['2\\n3']"
E             actual:   "['']"
E           
E           Expected stdout: "['2\\n3']"
E           Actual stdout:   "['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           seq 2 3 > myfile
E           foo=$(< myfile)
E           argv.py "$foo"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-command.test.sh::`< $file` behaves like $(< file)[L43]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109358fb0>
test_file = 'redirect-command.test.sh'
test_case = TestCase(name='`< $file` behaves like $(< file)', script='seq 7 8 > myfile\n\nx=`< myfile`\n\necho "[$x]"', assertions...None), Assertion(type='stdout', value='[]', shells=['dash', 'ash', 'yash'], variant='N-I')], line_number=43, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: `< $file` behaves like $(< file) (line 43)
E           
E           stdout mismatch:
E             expected: '[7\n8]'
E             actual:   '[]'
E           
E           Expected stdout: '[7\n8]'
E           Actual stdout:   '[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           seq 7 8 > myfile
E           
E           x=`< myfile`
E           
E           echo "[$x]"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-command.test.sh::Redirect in command sub[L138]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109359370>
test_file = 'redirect-command.test.sh'
test_case = TestCase(name='Redirect in command sub', script='FOO=$(echo foo 1>&2)\necho $FOO', assertions=[Assertion(type='stdout'...lls=None, variant=None), Assertion(type='stderr', value='foo', shells=None, variant=None)], line_number=138, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Redirect in command sub (line 138)
E           
E           stderr mismatch:
E             expected: 'foo'
E             actual:   ''
E           
E           Expected stdout: ''
E           Actual stdout:   '\n'
E           Expected stderr: 'foo'
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           FOO=$(echo foo 1>&2)
E           echo $FOO
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-command.test.sh::Redirect in function body is evaluated multiple times[L184]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109359670>
test_file = 'redirect-command.test.sh'
test_case = TestCase(name='Redirect in function body is evaluated multiple times', script='i=0\nfun() { echo "file $i"; } 1> "$TMP...dash'], variant='N-I'), Assertion(type='status', value=2, shells=['dash'], variant='N-I')], line_number=184, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Redirect in function body is evaluated multiple times (line 184)
E           
E           stdout mismatch:
E             expected: 'i=2\n__\nfile 1\n__\nfile 2'
E             actual:   'i=2\n__\nfile 0\n__\nfile 1'
E           
E           Expected stdout: 'i=2\n__\nfile 1\n__\nfile 2'
E           Actual stdout:   'i=2\n__\nfile 0\n__\nfile 1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           i=0
E           fun() { echo "file $i"; } 1> "$TMP/file$((i++))"
E           fun
E           fun
E           echo i=$i
E           echo __
E           cat $TMP/file0
E           echo __
E           cat $TMP/file1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-command.test.sh::Redirect in function body AND function call[L204]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109359730>
test_file = 'redirect-command.test.sh'
test_case = TestCase(name='Redirect in function body AND function call', script='fun() { echo hi; } 1>&2\nfun 2>&1', assertions=[A...shells=None, variant=None), Assertion(type='stderr', value='', shells=None, variant=None)], line_number=204, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Redirect in function body AND function call (line 204)
E           
E           stdout mismatch:
E             expected: 'hi'
E             actual:   ''
E           stderr mismatch:
E             expected: ''
E             actual:   'hi'
E           
E           Expected stdout: 'hi'
E           Actual stdout:   ''
E           Expected stderr: ''
E           Actual stderr:   'hi\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           fun() { echo hi; } 1>&2
E           fun 2>&1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-command.test.sh::redirect bash extensions:   [[  ((  for (([L213]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093597f0>
test_file = 'redirect-command.test.sh'
test_case = TestCase(name='redirect bash extensions:   [[  ((  for ((', script='case $SH in dash|mksh) exit ;; esac\n\nrm -f dbrac...variant=None), Assertion(type='stdout', value='', shells=['dash', 'mksh'], variant='N-I')], line_number=213, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: redirect bash extensions:   [[  ((  for (( (line 213)
E           
E           stdout mismatch:
E             expected: '  0 dbracket\n  0 dparen\n  1 for-expr\n  1 total'
E             actual:   'for-expr\n      0 total'
E           
E           Expected stdout: '  0 dbracket\n  0 dparen\n  1 for-expr\n  1 total'
E           Actual stdout:   'for-expr\n      0 total\n'
E           Expected stderr: None
E           Actual stderr:   'wc: dbracket: No such file or directory\nwc: dparen: No such file or directory\nwc: for-expr: No such file or directory\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           case $SH in dash|mksh) exit ;; esac
E           
E           rm -f dbracket dparen for-expr
E           
E           [[ x = x ]] > dbracket
E           
E           (( 42 )) > dparen
E           
E           for ((x = 0; x < 1; ++x)); do
E             echo for-expr
E           done > for-expr
E           
E           wc -l dbracket dparen for-expr
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-command.test.sh::Prefix redirect for loop -- not allowed[L291]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109359c70>
test_file = 'redirect-command.test.sh'
test_case = TestCase(name='Prefix redirect for loop -- not allowed', script='>$TMP/redirect2.txt for i in $(seq 3)\ndo\n  echo $i\... variant='BUG'), Assertion(type='stdout', value='1\n2\n3', shells=['zsh'], variant='BUG')], line_number=291, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Prefix redirect for loop -- not allowed (line 291)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 2, column 1
E           
E           
E           Script:
E           ---
E           >$TMP/redirect2.txt for i in $(seq 3)
E           do
E             echo $i
E           done
E           cat $TMP/redirect2.txt
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-command.test.sh::Nested function stdout redirect[L322]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109359eb0>
test_file = 'redirect-command.test.sh'
test_case = TestCase(name='Nested function stdout redirect', script='# Shows that a stack is necessary.\ninner() {\n  echo i1\n  e...sertions=[Assertion(type='stdout', value='i1\ni2\n--\no1\no2', shells=None, variant=None)], line_number=322, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Nested function stdout redirect (line 322)
E           
E           stdout mismatch:
E             expected: 'i1\ni2\n--\no1\no2'
E             actual:   'o1\ni1\ni2\no2\n--'
E           
E           Expected stdout: 'i1\ni2\n--\no1\no2'
E           Actual stdout:   'o1\ni1\ni2\no2\n--\n'
E           Expected stderr: None
E           Actual stderr:   'cat: /tmp/inner.txt: No such file or directory\ncat: /tmp/outer.txt: No such file or directory\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # Shows that a stack is necessary.
E           inner() {
E             echo i1
E             echo i2
E           }
E           outer() {
E             echo o1
E             inner > $TMP/inner.txt
E             echo o2
E           }
E           outer > $TMP/outer.txt
E           cat $TMP/inner.txt
E           echo --
E           cat $TMP/outer.txt
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-multi.test.sh::File redirects with glob args (bash and zsh only)[L3]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109359f70>
test_file = 'redirect-multi.test.sh'
test_case = TestCase(name='File redirects with glob args (bash and zsh only)', script='touch one-bar\n\necho hi > one-*\n\ncat one...), Assertion(type='stdout', value='escaped', shells=['dash', 'mksh', 'ash'], variant='N-I')], line_number=3, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: File redirects with glob args (bash and zsh only) (line 3)
E           
E           stdout mismatch:
E             expected: 'hi\nescaped'
E             actual:   'escaped'
E           
E           Expected stdout: 'hi\nescaped'
E           Actual stdout:   'escaped\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch one-bar
E           
E           echo hi > one-*
E           
E           cat one-bar
E           
E           echo escaped > one-\*
E           
E           cat one-\*
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-multi.test.sh::File redirect without matching any file, with failglob[L58]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935a1b0>
test_file = 'redirect-multi.test.sh'
test_case = TestCase(name='File redirect without matching any file, with failglob', script='shopt -s failglob\n\necho hi > zz-*-xx...s=['zsh'], variant='OK'), Assertion(type='stdout', value='', shells=['zsh'], variant='OK')], line_number=58, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: File redirect without matching any file, with failglob (line 58)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1'
E             actual:   'status=0\nzz-*-xx\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1'
E           Actual stdout:   'status=0\nzz-*-xx\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s failglob
E           
E           echo hi > zz-*-xx
E           echo status=$?
E           
E           echo zz*
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-multi.test.sh::Redirect to $empty (in function body)[L82]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935a270>
test_file = 'redirect-multi.test.sh'
test_case = TestCase(name='Redirect to $empty (in function body)', script="empty=''\nfun() { echo hi; } > $empty\nfun\necho status..., variant=None), Assertion(type='stdout', value='status=2', shells=['dash'], variant='OK')], line_number=82, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Redirect to $empty (in function body) (line 82)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   'status=0'
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           empty=''
E           fun() { echo hi; } > $empty
E           fun
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-multi.test.sh::Redirect to ''[L94]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935a330>
test_file = 'redirect-multi.test.sh'
test_case = TestCase(name="Redirect to ''", script="echo hi > ''\necho status=$?", assertions=[Assertion(type='stdout', value='status=1', shells=None, variant=None)], line_number=94, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Redirect to '' (line 94)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   'status=0'
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo hi > ''
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-multi.test.sh::File redirect to $var with glob char[L103]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935a3f0>
test_file = 'redirect-multi.test.sh'
test_case = TestCase(name='File redirect to $var with glob char', script="touch two-bar\n\nstar='*'\n\n# This gets glob-expanded, ...us=0\n==> two-bar <==\n\n==> two-* <==\nhi', shells=['mksh', 'zsh', 'ash'], variant='OK')], line_number=103, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: File redirect to $var with glob char (line 103)
E           
E           stdout mismatch:
E             expected: 'status=0\n==> two-bar <==\nhi'
E             actual:   'status=0\n==> two-bar <==\n\n==> two-* <==\nhi\n\n==> two-bar <=='
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: 'status=0\n==> two-bar <==\nhi'
E           Actual stdout:   'status=0\n==> two-bar <==\n\n==> two-* <==\nhi\n\n==> two-bar <==\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           touch two-bar
E           
E           star='*'
E           
E           # This gets glob-expanded, as it does outside redirects
E           echo hi > two-$star
E           echo status=$?
E           
E           head two-bar two-\*
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-multi.test.sh::File redirect that globs to more than one file (bash and zsh only)[L132]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935a4b0>
test_file = 'redirect-multi.test.sh'
test_case = TestCase(name='File redirect that globs to more than one file (bash and zsh only)', script='touch foo-bar\ntouch foo-s...e='status=0\n==> foo-bar <==\nhi\n\n==> foo-spam <==\nhi', shells=['zsh'], variant='BUG')], line_number=132, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: File redirect that globs to more than one file (bash and zsh only) (line 132)
E           
E           stdout mismatch:
E             expected: 'status=1\n==> foo-bar <==\n\n==> foo-spam <=='
E             actual:   'status=0\n==> foo-bar <==\n\n==> foo-spam <=='
E           
E           Expected stdout: 'status=1\n==> foo-bar <==\n\n==> foo-spam <=='
E           Actual stdout:   'status=0\n==> foo-bar <==\n\n==> foo-spam <==\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch foo-bar
E           touch foo-spam
E           
E           echo hi > foo-*
E           echo status=$?
E           
E           head foo-bar foo-spam
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-multi.test.sh::File redirect with extended glob[L165]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935a570>
test_file = 'redirect-multi.test.sh'
test_case = TestCase(name='File redirect with extended glob', script='shopt -s extglob\n\ntouch foo-bar\n\necho hi > @(*-bar|other...ariant='BUG'), Assertion(type='stdout', value='status=0', shells=['mksh'], variant='BUG')], line_number=165, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: File redirect with extended glob (line 165)
E           
E           stdout mismatch:
E             expected: 'status=0\nhi'
E             actual:   'status=0'
E           
E           Expected stdout: 'status=0\nhi'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s extglob
E           
E           touch foo-bar
E           
E           echo hi > @(*-bar|other)
E           echo status=$?
E           
E           cat foo-bar
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-multi.test.sh::Extended glob that doesn't match anything[L193]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935a630>
test_file = 'redirect-multi.test.sh'
test_case = TestCase(name="Extended glob that doesn't match anything", script="shopt -s extglob\nrm bad_*\n\n# They actually write...], variant='N-I'), Assertion(type='stdout-json', value='', shells=['zsh'], variant='N-I')], line_number=193, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Extended glob that doesn't match anything (line 193)
E           
E           stdout mismatch:
E             expected: 'status=0\nbad_@(*.cc|*.h)\nstatus=1'
E             actual:   'status=0\nbad_@(*.cc|*.h)\nstatus=0'
E           
E           Expected stdout: 'status=0\nbad_@(*.cc|*.h)\nstatus=1'
E           Actual stdout:   'status=0\nbad_@(*.cc|*.h)\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   "rm: cannot remove 'bad_*': No such file or directory\n"
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s extglob
E           rm bad_*
E           
E           # They actually write this literal file!  This is what EvalWordToString() does,
E           # as opposed to _EvalWordToParts.
E           echo foo > bad_@(*.cc|*.h)
E           echo status=$?
E           
E           echo bad_*
E           
E           shopt -s failglob
E           
E           # Note: ysh:ugprade doesn't allow extended globs
E           # shopt -s ysh:upgrade
E           
E           echo foo > bad_@(*.cc|*.h)
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-multi.test.sh::Non-file redirects don't respect glob args (we differe from bash)[L230]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935a6f0>
test_file = 'redirect-multi.test.sh'
test_case = TestCase(name="Non-file redirects don't respect glob args (we differe from bash)", script='touch 10\n\nexec 10>&1  # o...variant='N-I'), Assertion(type='stdout', value='', shells=['dash', 'zsh'], variant='N-I')], line_number=230, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Non-file redirects don't respect glob args (we differe from bash) (line 230)
E           
E           stdout mismatch:
E             expected: 'should-not-be-on-stdout\nstdout'
E             actual:   'stdout'
E           
E           Expected stdout: 'should-not-be-on-stdout\nstdout'
E           Actual stdout:   'stdout\n'
E           Expected stderr: None
E           Actual stderr:   'stderr\n'
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           touch 10
E           
E           exec 10>&1  # open stdout as descriptor 10
E           
E           # Does this go to stdout?  ONLY bash respects it, not zsh
E           echo should-not-be-on-stdout >& 1*
E           
E           echo stdout
E           echo stderr >&2
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-multi.test.sh::Redirect with brace expansion isn't allowed[L258]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935a7b0>
test_file = 'redirect-multi.test.sh'
test_case = TestCase(name="Redirect with brace expansion isn't allowed", script='echo hi > a-{one,two}\necho status=$?\n\nhead a-*...atus=0\n==> a-one <==\nhi\n\n==> a-two <==\nhi\nstatus=0', shells=['zsh'], variant='BUG')], line_number=258, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Redirect with brace expansion isn't allowed (line 258)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1'
E             actual:   'status=0\nhi\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1'
E           Actual stdout:   'status=0\nhi\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo hi > a-{one,two}
E           echo status=$?
E           
E           head a-*
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect-multi.test.sh::File redirects have word splitting too![L289]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935a870>
test_file = 'redirect-multi.test.sh'
test_case = TestCase(name='File redirects have word splitting too!', script='file=\'foo bar\'\n\necho hi > $file\necho status=$?\n...pe='stdout', value='status=0\nhi\nstatus=0', shells=['mksh', 'zsh', 'ash'], variant='OK')], line_number=289, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: File redirects have word splitting too! (line 289)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1'
E             actual:   'status=0\nhi\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1'
E           Actual stdout:   'status=0\nhi\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           file='foo bar'
E           
E           echo hi > $file
E           echo status=$?
E           
E           cat "$file"
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::>& and <& are the same[L4]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935a930>
test_file = 'redirect.test.sh'
test_case = TestCase(name='>& and <& are the same', script='echo one 1>&2\n\necho two 1<&2', assertions=[Assertion(type='stderr', value='one\ntwo', shells=None, variant=None)], line_number=4, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: >& and <& are the same (line 4)
E           
E           stderr mismatch:
E             expected: 'one\ntwo'
E             actual:   'one'
E           
E           Expected stdout: None
E           Actual stdout:   'two\n'
E           Expected stderr: 'one\ntwo'
E           Actual stderr:   'one\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo one 1>&2
E           
E           echo two 1<&2
E           ---

tests/spec_tests/test_spec.py:218: Failed
_________ TestBashSpecTests.test_spec_case[redirect.test.sh::<&[L16]] __________

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935a9f0>
test_file = 'redirect.test.sh'
test_case = TestCase(name='<&', script='# Is there a simpler test case for this?\necho foo51 > $TMP/lessamp.txt\n\nexec 6< $TMP/le...$line]"', assertions=[Assertion(type='stdout', value='[foo51]', shells=None, variant=None)], line_number=16, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: <& (line 16)
E           
E           stdout mismatch:
E             expected: '[foo51]'
E             actual:   '[]'
E           
E           Expected stdout: '[foo51]'
E           Actual stdout:   '[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Is there a simpler test case for this?
E           echo foo51 > $TMP/lessamp.txt
E           
E           exec 6< $TMP/lessamp.txt
E           read line <&6
E           
E           echo "[$line]"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::2&>1 (is it a redirect or is it like a&>1)[L38]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935ab70>
test_file = 'redirect.test.sh'
test_case = TestCase(name='2&>1 (is it a redirect or is it like a&>1)', script='2&>1\necho status=$?', assertions=[Assertion(type=...t=None), Assertion(type='stdout', value='status=0', shells=['mksh', 'dash'], variant='OK')], line_number=38, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: 2&>1 (is it a redirect or is it like a&>1) (line 38)
E           
E           stdout mismatch:
E             expected: 'status=127'
E             actual:   'status=1'
E           
E           Expected stdout: 'status=127'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           2&>1
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::Named file descriptor[L98]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935b0b0>
test_file = 'redirect.test.sh'
test_case = TestCase(name='Named file descriptor', script='exec {myfd}> $TMP/named-fd.txt\necho named-fd-contents >& $myfd\ncat $T...ariant='N-I'), Assertion(type='status', value=127, shells=['dash', 'mksh'], variant='N-I')], line_number=98, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Named file descriptor (line 98)
E           
E           stdout mismatch:
E             expected: 'named-fd-contents'
E             actual:   ''
E           
E           Expected stdout: 'named-fd-contents'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: {myfd}: command not found\n'
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           exec {myfd}> $TMP/named-fd.txt
E           echo named-fd-contents >& $myfd
E           cat $TMP/named-fd.txt
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::: 9> fdleak (OSH regression)[L115]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935b230>
test_file = 'redirect.test.sh'
test_case = TestCase(name=': 9> fdleak (OSH regression)', script='true 9> "$TMP/fd.txt"\n( echo world >&9 )\ncat "$TMP/fd.txt"', assertions=[Assertion(type='stdout-json', value='', shells=None, variant=None)], line_number=115, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: : 9> fdleak (OSH regression) (line 115)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'world'
E           
E           Expected stdout: ''
E           Actual stdout:   'world\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           true 9> "$TMP/fd.txt"
E           ( echo world >&9 )
E           cat "$TMP/fd.txt"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::3>&- << EOF (OSH regression: fail to restore fds)[L141]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935b470>
test_file = 'redirect.test.sh'
test_case = TestCase(name='3>&- << EOF (OSH regression: fail to restore fds)', script='exec 3> "$TMP/fd.txt"\necho hello 3>&- << E..."', assertions=[Assertion(type='stdout', value='hello\nworld', shells=None, variant=None)], line_number=141, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: 3>&- << EOF (OSH regression: fail to restore fds) (line 141)
E           
E           stdout mismatch:
E             expected: 'hello\nworld'
E             actual:   'hello'
E           
E           Expected stdout: 'hello\nworld'
E           Actual stdout:   'hello\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           exec 3> "$TMP/fd.txt"
E           echo hello 3>&- << EOF
E           EOF
E           echo world >&3
E           exec 3>&-  # close
E           cat "$TMP/fd.txt"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::Redirect to empty string[L181]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935b6b0>
test_file = 'redirect.test.sh'
test_case = TestCase(name='Redirect to empty string', script='f=\'\'\necho s > "$f"\necho "result=$?"\nset -o errexit\necho s > "$...['dash'], variant='OK'), Assertion(type='status', value=2, shells=['dash'], variant='OK')], line_number=181, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Redirect to empty string (line 181)
E           
E           stdout mismatch:
E             expected: 'result=1'
E             actual:   'result=0\nDONE'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: 'result=1'
E           Actual stdout:   'result=0\nDONE\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           f=''
E           echo s > "$f"
E           echo "result=$?"
E           set -o errexit
E           echo s > "$f"
E           echo DONE
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::Redirect to file descriptor that's not open[L193]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935b770>
test_file = 'redirect.test.sh'
test_case = TestCase(name="Redirect to file descriptor that's not open", script='# Notes:\n# - 7/2021: descriptor 7 seems to work ...lls=None, variant=None), Assertion(type='status', value=2, shells=['dash'], variant='OK')], line_number=193, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Redirect to file descriptor that's not open (line 193)
E           
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # Notes:
E           # - 7/2021: descriptor 7 seems to work on all CI systems.  The process state
E           #   isn't clean, but we could probably close it in OSH?
E           # - dash doesn't allow file descriptors greater than 9.  (This is a good
E           #   thing, because the bash chapter in AOSA book mentions that juggling user
E           #   vs.  system file descriptors is a huge pain.)
E           # - But somehow running in parallel under spec-runner.sh changes whether
E           #   descriptor 3 is open.  e.g. 'echo hi 1>&3'.  Possibly because of
E           #   /usr/bin/time.  The _tmp/spec/*.task.txt file gets corrupted!
E           # - Oh this is because I use time --output-file.  That opens descriptor 3.  And
E           #   then time forks the shell script.  The file descriptor table is inherited.
E           #   - You actually have to set the file descriptor to something.  What do
E           #   configure and debootstrap too?
E           
E           opened=$(ls /proc/$$/fd)
E           if echo "$opened" | egrep '^7$'; then
E             echo "FD 7 shouldn't be open"
E             echo "OPENED:"
E             echo "$opened"
E           fi
E           
E           echo hi 1>&7
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[redirect.test.sh::>| to clobber[L239]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935b9b0>
test_file = 'redirect.test.sh'
test_case = TestCase(name='>| to clobber', script='echo XX >| $TMP/c.txt\n\nset -o noclobber\n\necho YY >  $TMP/c.txt  # not clobb...=None), Assertion(type='stdout', value='status=2\nXX\nZZ', shells=['dash'], variant='OK')], line_number=239, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: >| to clobber (line 239)
E           
E           stdout mismatch:
E             expected: 'status=1\nXX\nZZ'
E             actual:   'XX\nstatus=0\nYY\nZZ\nYY'
E           
E           Expected stdout: 'status=1\nXX\nZZ'
E           Actual stdout:   'XX\nstatus=0\nYY\nZZ\nYY\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: noclobber: invalid option name\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo XX >| $TMP/c.txt
E           
E           set -o noclobber
E           
E           echo YY >  $TMP/c.txt  # not clobber
E           echo status=$?
E           
E           cat $TMP/c.txt
E           echo ZZ >| $TMP/c.txt
E           
E           cat $TMP/c.txt
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::1>&2- to move file descriptor[L309]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935bcb0>
test_file = 'redirect.test.sh'
test_case = TestCase(name='1>&2- to move file descriptor', script='exec 5> "$TMP/f.txt"\necho hello5 >&5\nexec 6>&5-\necho world5 ..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='N-I')], line_number=309, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: 1>&2- to move file descriptor (line 309)
E           
E           stdout mismatch:
E             expected: 'hello5\nworld6'
E             actual:   'hello5\nworld5'
E           
E           Expected stdout: 'hello5\nworld6'
E           Actual stdout:   'hello5\nworld5\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           exec 5> "$TMP/f.txt"
E           echo hello5 >&5
E           exec 6>&5-
E           echo world5 >&5
E           echo world6 >&6
E           exec 6>&-
E           cat "$TMP/f.txt"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::1>&2- (Bash bug: fail to restore closed fd)[L326]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935bd70>
test_file = 'redirect.test.sh'
test_case = TestCase(name='1>&2- (Bash bug: fail to restore closed fd)', script='# 7/2021: descriptor 8 is open on Github Actions,..., variant='BUG'), Assertion(type='stdout', value='hello', shells=['bash'], variant='BUG')], line_number=326, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: 1>&2- (Bash bug: fail to restore closed fd) (line 326)
E           
E           stdout mismatch:
E             expected: 'hello'
E             actual:   ''
E           
E           Expected stdout: 'hello'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # 7/2021: descriptor 8 is open on Github Actions, so use descriptor 6 instead
E           
E           # Fix for CI systems where process state isn't clean: Close descriptors 6 and 7.
E           exec 6>&- 7>&-
E           
E           opened=$(ls /proc/$$/fd)
E           if echo "$opened" | egrep '^7$'; then
E             echo "FD 7 shouldn't be open"
E             echo "OPENED:"
E             echo "$opened"
E           fi
E           if echo "$opened" | egrep '^6$'; then
E             echo "FD 6 shouldn't be open"
E             echo "OPENED:"
E             echo "$opened"
E           fi
E           
E           exec 7> "$TMP/f.txt"
E           : 6>&7 7>&-
E           echo hello >&7
E           : 6>&7-
E           echo world >&7
E           exec 7>&-
E           cat "$TMP/f.txt"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::<> for read/write[L360]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935be30>
test_file = 'redirect.test.sh'
test_case = TestCase(name='<> for read/write', script='echo first >$TMP/rw.txt\nexec 8<>$TMP/rw.txt\nread line <&8\necho line=$lin...on(type='stdout', value='line=first\nCONTENTS\nfirst\nsecond', shells=None, variant=None)], line_number=360, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: <> for read/write (line 360)
E           
E           stdout mismatch:
E             expected: 'line=first\nCONTENTS\nfirst\nsecond'
E             actual:   'line=\nCONTENTS\nfirst'
E           
E           Expected stdout: 'line=first\nCONTENTS\nfirst\nsecond'
E           Actual stdout:   'line=\nCONTENTS\nfirst\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo first >$TMP/rw.txt
E           exec 8<>$TMP/rw.txt
E           read line <&8
E           echo line=$line
E           echo second 1>&8
E           echo CONTENTS
E           cat $TMP/rw.txt
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::<> for read/write named pipes[L375]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935bef0>
test_file = 'redirect.test.sh'
test_case = TestCase(name='<> for read/write named pipes', script='rm -f "$TMP/f.pipe"\nmkfifo "$TMP/f.pipe"\nexec 8<> "$TMP/f.pip...ns=[Assertion(type='stdout', value='line1=first line2=second', shells=None, variant=None)], line_number=375, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: <> for read/write named pipes (line 375)
E           
E           stdout mismatch:
E             expected: 'line1=first line2=second'
E             actual:   'line1= line2='
E           
E           Expected stdout: 'line1=first line2=second'
E           Actual stdout:   'line1= line2=\n'
E           Expected stderr: None
E           Actual stderr:   'bash: mkfifo: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           rm -f "$TMP/f.pipe"
E           mkfifo "$TMP/f.pipe"
E           exec 8<> "$TMP/f.pipe"
E           echo first >&8
E           echo second >&8
E           read line1 <&8
E           read line2 <&8
E           exec 8<&-
E           echo line1=$line1 line2=$line2
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::&>> appends stdout and stderr[L388]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10935bfb0>
test_file = 'redirect.test.sh'
test_case = TestCase(name='&>> appends stdout and stderr', script='# Fix for flaky tests: dash behaves non-deterministically under...dash'], variant='N-I'), Assertion(type='status', value=1, shells=['dash'], variant='N-I')], line_number=388, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: &>> appends stdout and stderr (line 388)
E           
E           stdout mismatch:
E             expected: 'ok\nok\nok'
E             actual:   'STDOUT\nok'
E           
E           Expected stdout: 'ok\nok\nok'
E           Actual stdout:   'STDOUT\nok\n'
E           Expected stderr: None
E           Actual stderr:   'STDERR\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # Fix for flaky tests: dash behaves non-deterministically under load!  It
E           # doesn't implement the behavior anyway so I don't care why.
E           case $SH in
E             *dash)
E               exit 1
E               ;;
E           esac
E           
E           echo "ok" > $TMP/f.txt
E           stdout_stderr.py &>> $TMP/f.txt
E           grep ok $TMP/f.txt >/dev/null && echo 'ok'
E           grep STDOUT $TMP/f.txt >/dev/null && echo 'ok'
E           grep STDERR $TMP/f.txt >/dev/null && echo 'ok'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::echo foo >&100 (OSH regression: does not fail with invalid fd 100)[L456]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093802f0>
test_file = 'redirect.test.sh'
test_case = TestCase(name='echo foo >&100 (OSH regression: does not fail with invalid fd 100)', script='# oil 0.8.pre4 does not fa...lls=None, variant=None), Assertion(type='status', value=2, shells=['dash'], variant='OK')], line_number=456, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: echo foo >&100 (OSH regression: does not fail with invalid fd 100) (line 456)
E           
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # oil 0.8.pre4 does not fail with non-existent fd 100.
E           fd=100
E           echo foo53 >&$fd
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::echo foo >&N where N is first unused fd[L464]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093803b0>
test_file = 'redirect.test.sh'
test_case = TestCase(name='echo foo >&N where N is first unused fd', script='# 1. prepare default fd for internal uses\nminfd=10\n...lls=None, variant=None), Assertion(type='status', value=2, shells=['dash'], variant='OK')], line_number=464, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: echo foo >&N where N is first unused fd (line 464)
E           
E           Execution error: while loop: too many iterations (10000)
E           
E           
E           Script:
E           ---
E           # 1. prepare default fd for internal uses
E           minfd=10
E           case ${SH##*/} in
E           (mksh) minfd=24 ;;
E           (osh) minfd=100 ;;
E           esac
E           
E           # 2. prepare first unused fd
E           fd=$minfd
E           is_fd_open() { : >&$1; }
E           while is_fd_open "$fd"; do
E             : $((fd+=1))
E           
E             # OLD: prevent infinite loop for broken oils-for-unix
E             #if test $fd -gt 1000; then
E             #  break
E             #fi
E           done
E           
E           # 3. test
E           echo foo54 >&$fd
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::exec {fd}>&- (OSH regression: fails to close fd)[L490]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109380470>
test_file = 'redirect.test.sh'
test_case = TestCase(name='exec {fd}>&- (OSH regression: fails to close fd)', script='# mksh, dash do not implement {fd} redirecti...variant='N-I'), Assertion(type='status', value=1, shells=['mksh', 'dash'], variant='N-I')], line_number=490, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: exec {fd}>&- (OSH regression: fails to close fd) (line 490)
E           
E           stdout mismatch:
E             expected: 'foo55'
E             actual:   ''
E           
E           Expected stdout: 'foo55'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: {fd}: command not found\nbash: {fd}: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # mksh, dash do not implement {fd} redirections.
E           case $SH in mksh|dash) exit 1 ;; esac
E           # oil 0.8.pre4 fails to close fd by {fd}&-.
E           exec {fd}>file1
E           echo foo55 >&$fd
E           exec {fd}>&-
E           echo bar >&$fd
E           cat file1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::noclobber can still write to non-regular files like /dev/null[L503]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109380530>
test_file = 'redirect.test.sh'
test_case = TestCase(name='noclobber can still write to non-regular files like /dev/null', script='set -C  # noclobber\nset -e  # ...ls=['dash'], variant='OK'), Assertion(type='stdout', value='', shells=None, variant=None)], line_number=503, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: noclobber can still write to non-regular files like /dev/null (line 503)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'a\na'
E           
E           Expected stdout: ''
E           Actual stdout:   'a\na\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: -C: invalid option\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -C  # noclobber
E           set -e  # errexit (raise any redirection errors)
E           
E           # Each redirect to /dev/null should succeed
E           echo a  >  /dev/null  # trunc, write stdout
E           echo a &>  /dev/null  # trunc, write stdout and stderr
E           echo a  >> /dev/null  # append, write stdout
E           echo a &>> /dev/null  # append, write stdout and stderr
E           echo a  >| /dev/null  # ignore noclobber, trunc, write stdout
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[redirect.test.sh::Parsing of x={myvar} and related cases[L542]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093806b0>
test_file = 'redirect.test.sh'
test_case = TestCase(name='Parsing of x={myvar} and related cases', script='case $SH in dash) exit ;; esac\n\necho {myvar}>/dev/st...ash'], variant='BUG'), Assertion(type='stdout', value='', shells=['dash'], variant='N-I')], line_number=542, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Parsing of x={myvar} and related cases (line 542)
E           
E           stdout mismatch:
E             expected: '\nx={myvar}\n0\nx={myvar}\n0\nx=\n1\n+{myvar}\n1\n+{myvar}\n1\n+\n2'
E             actual:   '{myvar}\nx={myvar}\n0\nx={myvar}\n0\nx= {myvar}\n0\n+{myvar}\n0\n+{myvar}\n0\n+ {myvar}\n0'
E           
E           Expected stdout: '\nx={myvar}\n0\nx={myvar}\n0\nx=\n1\n+{myvar}\n1\n+{myvar}\n1\n+\n2'
E           Actual stdout:   '{myvar}\nx={myvar}\n0\nx={myvar}\n0\nx= {myvar}\n0\n+{myvar}\n0\n+{myvar}\n0\n+ {myvar}\n0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           echo {myvar}>/dev/stdout
E           # Bash chooses fds starting with 10 here, osh with 100, and there can already
E           # be some open fds, so compare further fds against this one
E           starting_fd=$myvar
E           
E           echo x={myvar}>/dev/stdout
E           echo $((myvar-starting_fd))
E           echo x={myvar} >/dev/stdout
E           echo $((myvar-starting_fd))
E           echo x= {myvar}>/dev/stdout
E           echo $((myvar-starting_fd))
E           
E           echo +{myvar}>/dev/stdout
E           echo $((myvar-starting_fd))
E           echo +{myvar} >/dev/stdout
E           echo $((myvar-starting_fd))
E           echo + {myvar}>/dev/stdout
E           echo $((myvar-starting_fd))
E           ---

tests/spec_tests/test_spec.py:218: Failed
______ TestBashSpecTests.test_spec_case[regex.test.sh::BASH_REMATCH[L39]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109380830>
test_file = 'regex.test.sh'
test_case = TestCase(name='BASH_REMATCH', script='[[ foo123 =~ ([a-z]+)([0-9]+) ]]\necho status=$?\nargv.py "${BASH_REMATCH[@]}"\n...tion(type='stdout', value="status=0\n['']\nstatus=1\n['']", shells=['zsh'], variant='N-I')], line_number=39, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: BASH_REMATCH (line 39)
E           
E           stdout mismatch:
E             expected: "status=0\n['foo123', 'foo', '123']\nstatus=1\n[]"
E             actual:   "status=0\n['foo123', 'foo', '123']\nstatus=1\n['foo123', 'foo', '123']"
E           
E           Expected stdout: "status=0\n['foo123', 'foo', '123']\nstatus=1\n[]"
E           Actual stdout:   "status=0\n['foo123', 'foo', '123']\nstatus=1\n['foo123', 'foo', '123']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           [[ foo123 =~ ([a-z]+)([0-9]+) ]]
E           echo status=$?
E           argv.py "${BASH_REMATCH[@]}"
E           
E           [[ failed =~ ([a-z]+)([0-9]+) ]]
E           echo status=$?
E           argv.py "${BASH_REMATCH[@]}"  # not cleared!
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::Regex quoted with single quotes[L74]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109380b30>
test_file = 'regex.test.sh'
test_case = TestCase(name='Regex quoted with single quotes', script="# bash doesn't like the quotes\n[[ 'a b' =~ '^(a b)$' ]] && e...ls=['zsh'], variant='OK'), Assertion(type='status', value=0, shells=['zsh'], variant='OK')], line_number=74, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Regex quoted with single quotes (line 74)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'true'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'true\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash doesn't like the quotes
E           [[ 'a b' =~ '^(a b)$' ]] && echo true
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::Regex quoted with double quotes[L82]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109380bf0>
test_file = 'regex.test.sh'
test_case = TestCase(name='Regex quoted with double quotes', script='# bash doesn\'t like the quotes\n[[ \'a b\' =~ "^(a b)$" ]] &...ls=['zsh'], variant='OK'), Assertion(type='status', value=0, shells=['zsh'], variant='OK')], line_number=82, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Regex quoted with double quotes (line 82)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'true'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'true\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash doesn't like the quotes
E           [[ 'a b' =~ "^(a b)$" ]] && echo true
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::Double quoting pat variable -- again bash doesn't like it.[L100]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109380e30>
test_file = 'regex.test.sh'
test_case = TestCase(name="Double quoting pat variable -- again bash doesn't like it.", script='pat="^(a b)$"\n[[ \'a b\' =~ "$pat...s=['zsh'], variant='OK'), Assertion(type='status', value=0, shells=['zsh'], variant='OK')], line_number=100, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Double quoting pat variable -- again bash doesn't like it. (line 100)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'true'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'true\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           pat="^(a b)$"
E           [[ 'a b' =~ "$pat" ]] && echo true
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::Regex with == and not =~ is parse error, different lexer mode required[L116]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109380fb0>
test_file = 'regex.test.sh'
test_case = TestCase(name='Regex with == and not =~ is parse error, different lexer mode required', script="# They both give a syn...ells=None, variant=None), Assertion(type='status', value=1, shells=['zsh'], variant='OK')], line_number=116, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Regex with == and not =~ is parse error, different lexer mode required (line 116)
E           
E           Execution error: Expected ']]' to close conditional at line 2, column 18
E           
E           
E           Script:
E           ---
E           # They both give a syntax error.  This is lame.
E           [[ '^(a b)$' == ^(a\ b)$ ]] && echo true
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::Regex to match literal brackets [][L140]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093812b0>
test_file = 'regex.test.sh'
test_case = TestCase(name='Regex to match literal brackets []', script="# bash-completion relies on this, so we're making it match...None, variant=None), Assertion(type='stdout', value='true', shells=['zsh'], variant='OK')], line_number=140, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Regex to match literal brackets [] (line 140)
E           
E           stdout mismatch:
E             expected: 'true\ntrue'
E             actual:   'true'
E           
E           Expected stdout: 'true\ntrue'
E           Actual stdout:   'true\n'
E           Expected stderr: None
E           Actual stderr:   'bash: conditional: syntax error in regular expression\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash-completion relies on this, so we're making it match bash.
E           # zsh understandably differs.
E           [[ '[]' =~ \[\] ]] && echo true
E           
E           # Another way to write this.
E           pat='\[\]'
E           [[ '[]' =~ $pat ]] && echo true
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::Regex to match literals . ^ $ etc.[L156]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109381370>
test_file = 'regex.test.sh'
test_case = TestCase(name='Regex to match literals . ^ $ etc.', script="[[ 'x' =~ \\. ]] || echo false\n[[ '.' =~ \\. ]] && echo t...['zsh'], variant='BUG'), Assertion(type='status', value=1, shells=['zsh'], variant='BUG')], line_number=156, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Regex to match literals . ^ $ etc. (line 156)
E           
E           stdout mismatch:
E             expected: 'false\ntrue\nfalse\ntrue\nfalse\ntrue\nfalse\ntrue'
E             actual:   'true\nfalse\nfalse\nfalse\ntrue'
E           
E           Expected stdout: 'false\ntrue\nfalse\ntrue\nfalse\ntrue\nfalse\ntrue'
E           Actual stdout:   'true\nfalse\nfalse\nfalse\ntrue\n'
E           Expected stderr: None
E           Actual stderr:   'bash: conditional: syntax error in regular expression\nbash: conditional: syntax error in regular expression\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           [[ 'x' =~ \. ]] || echo false
E           [[ '.' =~ \. ]] && echo true
E           
E           [[ 'xx' =~ \^\$ ]] || echo false
E           [[ '^$' =~ \^\$ ]] && echo true
E           
E           [[ 'xxx' =~ \+\*\? ]] || echo false
E           [[ '*+?' =~ \*\+\? ]] && echo true
E           
E           [[ 'xx' =~ \{\} ]] || echo false
E           [[ '{}' =~ \{\} ]] && echo true
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::Unquoted { is a regex parse error[L186]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109381430>
test_file = 'regex.test.sh'
test_case = TestCase(name='Unquoted { is a regex parse error', script='[[ { =~ { ]] && echo true\necho status=$?', assertions=[Ass...['zsh'], variant='BUG'), Assertion(type='status', value=0, shells=['zsh'], variant='BUG')], line_number=186, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Unquoted { is a regex parse error (line 186)
E           
E           Execution error: Expected conditional expression at line 1, column 4
E           
E           
E           Script:
E           ---
E           [[ { =~ { ]] && echo true
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::Fatal error inside [[ =~ ]][L200]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093814f0>
test_file = 'regex.test.sh'
test_case = TestCase(name='Fatal error inside [[ =~ ]]', script='# zsh and osh are stricter than bash.  bash treats [[ like a comm...bash'], variant='BUG'), Assertion(type='status', value=0, shells=['bash'], variant='BUG')], line_number=200, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Fatal error inside [[ =~ ]] (line 200)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: division by 0\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           # zsh and osh are stricter than bash.  bash treats [[ like a command.
E           
E           [[ a =~ $(( 1 / 0 )) ]]
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[regex.test.sh::Quoted { and +[L211]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093815b0>
test_file = 'regex.test.sh'
test_case = TestCase(name='Quoted { and +', script='[[ { =~ "{" ]] && echo \'yes {\'\n[[ + =~ "+" ]] && echo \'yes +\'\n[[ * =~ "*... )\nyes |\n---\nyes .\n---\na 0\n- 1\nb 0\nz 0\nstatus=0', shells=['zsh'], variant='N-I')], line_number=211, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Quoted { and + (line 211)
E           
E           Execution error: Expected conditional expression at line 1, column 4
E           
E           
E           Script:
E           ---
E           [[ { =~ "{" ]] && echo 'yes {'
E           [[ + =~ "+" ]] && echo 'yes +'
E           [[ * =~ "*" ]] && echo 'yes *'
E           [[ ? =~ "?" ]] && echo 'yes ?'
E           [[ ^ =~ "^" ]] && echo 'yes ^'
E           [[ $ =~ "$" ]] && echo 'yes $'
E           [[ '(' =~ '(' ]] && echo 'yes ('
E           [[ ')' =~ ')' ]] && echo 'yes )'
E           [[ '|' =~ '|' ]] && echo 'yes |'
E           [[ '\' =~ '\' ]] && echo 'yes \'
E           echo ---
E           
E           [[ . =~ "." ]] && echo 'yes .'
E           [[ z =~ "." ]] || echo 'no .'
E           echo ---
E           
E           # This rule is weird but all shells agree.  I would expect that the - gets
E           # escaped?  It's an operator?  but it behaves like a-z.
E           [[ a =~ ["a-z"] ]]; echo "a $?"
E           [[ - =~ ["a-z"] ]]; echo "- $?"
E           [[ b =~ ['a-z'] ]]; echo "b $?"
E           [[ z =~ ['a-z'] ]]; echo "z $?"
E           
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_______ TestBashSpecTests.test_spec_case[regex.test.sh::Escaped {[L272]] _______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109381670>
test_file = 'regex.test.sh'
test_case = TestCase(name='Escaped {', script='# from bash-completion\n[[ \'$PA\' =~ ^(\\$\\{?)([A-Za-z0-9_]*)$ ]] && argv.py "${B...['zsh'], variant='BUG'), Assertion(type='status', value=1, shells=['zsh'], variant='BUG')], line_number=272, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Escaped { (line 272)
E           
E           stdout mismatch:
E             expected: "['$PA', '$', 'PA']"
E             actual:   ''
E           
E           Expected stdout: "['$PA', '$', 'PA']"
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # from bash-completion
E           [[ '$PA' =~ ^(\$\{?)([A-Za-z0-9_]*)$ ]] && argv.py "${BASH_REMATCH[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[regex.test.sh::pattern a=(1)[L328]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109381a30>
test_file = 'regex.test.sh'
test_case = TestCase(name='pattern a=(1)', script='[[ a=x =~ a=(x) ]]\necho status=$?\n[[ =x =~ a=(x) ]]\necho status=$?', asserti...variant='BUG'), Assertion(type='stdout', value='status=0', shells=['zsh'], variant='BUG')], line_number=328, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: pattern a=(1) (line 328)
E           
E           Execution error: Expected conditional expression at line 1, column 4
E           
E           
E           Script:
E           ---
E           [[ a=x =~ a=(x) ]]
E           echo status=$?
E           [[ =x =~ a=(x) ]]
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::Bug: Nix idiom with closing ) next to pattern[L354]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109381bb0>
test_file = 'regex.test.sh'
test_case = TestCase(name='Bug: Nix idiom with closing ) next to pattern', script='if [[ ! (" ${params[*]} " =~ " -shared " || " $...o\nfi', assertions=[Assertion(type='stdout', value='one\ntwo', shells=None, variant=None)], line_number=354, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Bug: Nix idiom with closing ) next to pattern (line 354)
E           
E           Execution error: Expected RPAREN, got DBRACK_END at line 1, column 78
E           
E           
E           Script:
E           ---
E           if [[ ! (" ${params[*]} " =~ " -shared " || " ${params[*]} " =~ " -static ") ]]; then
E             echo one
E           fi
E           
E           # Reduced idiom
E           if [[ (foo =~ foo) ]]; then
E             echo two
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::unquoted (a  b) as pattern, (a  b|c)[L370]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109381c70>
test_file = 'regex.test.sh'
test_case = TestCase(name='unquoted (a  b) as pattern, (a  b|c)', script="if [[ 'a  b' =~ (a  b) ]]; then\n  echo one\nfi\n\nif [[... assertions=[Assertion(type='stdout', value='one\ntwo\nthree', shells=None, variant=None)], line_number=370, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: unquoted (a  b) as pattern, (a  b|c) (line 370)
E           
E           stdout mismatch:
E             expected: 'one\ntwo\nthree'
E             actual:   'three'
E           
E           Expected stdout: 'one\ntwo\nthree'
E           Actual stdout:   'three\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           if [[ 'a  b' =~ (a  b) ]]; then
E             echo one
E           fi
E           
E           if [[ 'a b' =~ (a  b) ]]; then
E             echo BAD
E           fi
E           
E           if [[ 'a b' =~ (a b|c) ]]; then
E             echo two
E           fi
E           
E           # I think spaces are only allowed within ()
E           
E           if [[ '  c' =~ (a|  c) ]]; then
E             echo three
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::Multiple adjacent () groups[L396]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109381d30>
test_file = 'regex.test.sh'
test_case = TestCase(name='Multiple adjacent () groups', script='if [[ \'a-b-c-d\' =~ a-(b|  >>)-c-( ;|[de])|ff|gg ]]; then\n  arg..., Assertion(type='stdout', value="['']\n['']\n['']\n['']", shells=['zsh'], variant='BUG')], line_number=396, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Multiple adjacent () groups (line 396)
E           
E           Execution error: Expected ']]' to close conditional at line 1, column 27
E           
E           
E           Script:
E           ---
E           if [[ 'a-b-c-d' =~ a-(b|  >>)-c-( ;|[de])|ff|gg ]]; then
E             argv.py "${BASH_REMATCH[@]}"
E           fi
E           
E           if [[ ff =~ a-(b|  >>)-c-( ;|[de])|ff|gg ]]; then
E             argv.py "${BASH_REMATCH[@]}"
E           fi
E           
E           # empty group ()
E           
E           if [[ zz =~ ([a-z]+)() ]]; then
E             argv.py "${BASH_REMATCH[@]}"
E           fi
E           
E           # nested empty group
E           if [[ zz =~ ([a-z]+)(()z) ]]; then
E             argv.py "${BASH_REMATCH[@]}"
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::unquoted [a  b] as pattern, [a  b|c][L431]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109381df0>
test_file = 'regex.test.sh'
test_case = TestCase(name='unquoted [a  b] as pattern, [a  b|c]', script='$SH <<\'EOF\'\n[[ a =~ [ab] ]] && echo yes\nEOF\necho "[...(type='stdout', value='yes\n[ab]=0\n[a b]=1\nyes\n[a b]=0', shells=['zsh'], variant='OK')], line_number=431, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: unquoted [a  b] as pattern, [a  b|c] (line 431)
E           
E           stdout mismatch:
E             expected: 'yes\n[ab]=0\n[a b]=2\nyes\n[a b]=0'
E             actual:   '[ab]=0\n[a b]=0\n[a b]=0'
E           
E           Expected stdout: 'yes\n[ab]=0\n[a b]=2\nyes\n[a b]=0'
E           Actual stdout:   '[ab]=0\n[a b]=0\n[a b]=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           $SH <<'EOF'
E           [[ a =~ [ab] ]] && echo yes
E           EOF
E           echo "[ab]=$?"
E           
E           $SH <<'EOF'
E           [[ a =~ [a b] ]] && echo yes
E           EOF
E           echo "[a b]=$?"
E           
E           $SH <<'EOF'
E           [[ a =~ ([a b]) ]] && echo yes
E           EOF
E           echo "[a b]=$?"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::Operator chars ; & but not |[L475]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109381f70>
test_file = 'regex.test.sh'
test_case = TestCase(name='Operator chars ; & but not |', script="# Hm semicolon is still an operator in bash\n$SH <<'EOF'\n[[ ';'...emi paren=1\n\namp=1\npipe=1\npipe=1\nangle=1\nnewline=1', shells=['zsh'], variant='BUG')], line_number=475, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Operator chars ; & but not | (line 475)
E           
E           stdout mismatch:
E             expected: 'semi=2\nsemi paren\nsemi paren=0\n\namp=2\npipe1\npipe2\npipe=0\nfour\npipe=0\nangle=2\nnewline=2'
E             actual:   'semi=0\nsemi paren=0\n\namp=0\npipe=0\npipe=0\nangle=0\nnewline=0'
E           
E           Expected stdout: 'semi=2\nsemi paren\nsemi paren=0\n\namp=2\npipe1\npipe2\npipe=0\nfour\npipe=0\nangle=2\nnewline=2'
E           Actual stdout:   'semi=0\nsemi paren=0\n\namp=0\npipe=0\npipe=0\nangle=0\nnewline=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Hm semicolon is still an operator in bash
E           $SH <<'EOF'
E           [[ ';' =~ ; ]] && echo semi
E           EOF
E           echo semi=$?
E           
E           $SH <<'EOF'
E           [[ ';' =~ (;) ]] && echo semi paren
E           EOF
E           echo semi paren=$?
E           
E           echo
E           
E           $SH <<'EOF'
E           [[ '&' =~ & ]] && echo amp
E           EOF
E           echo amp=$?
E           
E           # Oh I guess this is not a bug?  regcomp doesn't reject this trivial regex?
E           $SH <<'EOF'
E           [[ '|' =~ | ]] && echo pipe1
E           [[ 'a' =~ | ]] && echo pipe2
E           EOF
E           echo pipe=$?
E           
E           $SH <<'EOF'
E           [[ '|' =~ a| ]] && echo four
E           EOF
E           echo pipe=$?
E           
E           # This is probably special because > operator is inside foo [[ a > b ]]
E           $SH <<'EOF'
E           [[ '<>' =~ <> ]] && echo angle
E           EOF
E           echo angle=$?
E           
E           # Bug: OSH allowed this!
E           $SH <<'EOF'
E           [[ $'a\nb' =~ a
E           b ]] && echo newline
E           EOF
E           echo newline=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::Quotes '' "" $'' $"" in pattern[L547]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109382030>
test_file = 'regex.test.sh'
test_case = TestCase(name='Quotes \'\' "" $\'\' $"" in pattern', script='$SH <<\'EOF\'\n[[ \'|\' =~ \'|\' ]] && echo sq\nEOF\necho...=0\ndq\ndq=0\ndollar-sq\ndollar-sq=0\ndollar-dq\ndollar-dq=0', shells=None, variant=None)], line_number=547, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Quotes '' "" $'' $"" in pattern (line 547)
E           
E           stdout mismatch:
E             expected: 'sq\nsq=0\ndq\ndq=0\ndollar-sq\ndollar-sq=0\ndollar-dq\ndollar-dq=0'
E             actual:   'sq=0\ndq=0\ndollar-sq=0\ndollar-dq=0'
E           
E           Expected stdout: 'sq\nsq=0\ndq\ndq=0\ndollar-sq\ndollar-sq=0\ndollar-dq\ndollar-dq=0'
E           Actual stdout:   'sq=0\ndq=0\ndollar-sq=0\ndollar-dq=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           $SH <<'EOF'
E           [[ '|' =~ '|' ]] && echo sq
E           EOF
E           echo sq=$?
E           
E           $SH <<'EOF'
E           [[ '|' =~ "|" ]] && echo dq
E           EOF
E           echo dq=$?
E           
E           $SH <<'EOF'
E           [[ '|' =~ $'|' ]] && echo dollar-sq
E           EOF
E           echo dollar-sq=$?
E           
E           $SH <<'EOF'
E           [[ '|' =~ $"|" ]] && echo dollar-dq
E           EOF
E           echo dollar-dq=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[regex.test.sh::Unicode in pattern[L581]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093820f0>
test_file = 'regex.test.sh'
test_case = TestCase(name='Unicode in pattern', script="$SH <<'EOF'\n[[ μ =~ μ ]] && echo mu\nEOF\necho mu=$?", assertions=[Assertion(type='stdout', value='mu\nmu=0', shells=None, variant=None)], line_number=581, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Unicode in pattern (line 581)
E           
E           stdout mismatch:
E             expected: 'mu\nmu=0'
E             actual:   'mu=0'
E           
E           Expected stdout: 'mu\nmu=0'
E           Actual stdout:   'mu=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           $SH <<'EOF'
E           [[ μ =~ μ ]] && echo mu
E           EOF
E           echo mu=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::Parse error with 2 words[L593]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093821b0>
test_file = 'regex.test.sh'
test_case = TestCase(name='Parse error with 2 words', script='if [[ a =~ c a ]]; then\n  echo one\nfi', assertions=[Assertion(type...h'], variant='BUG'), Assertion(type='stdout', value='one', shells=['zsh'], variant='BUG')], line_number=593, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Parse error with 2 words (line 593)
E           
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           if [[ a =~ c a ]]; then
E             echo one
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[regex.test.sh::make a lisp example[L608]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109382270>
test_file = 'regex.test.sh'
test_case = TestCase(name='make a lisp example', script='str=\'(hi)\'\n[[ "${str}" =~ ^^([][{}\\(\\)^@])|^(~@)|(\\"(\\\\.|[^\\\\\\...iant=None), Assertion(type='stdout', value='status=1\nm=', shells=['zsh'], variant='BUG')], line_number=608, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: make a lisp example (line 608)
E           
E           Execution error: Expected ']]' to close conditional at line 2, column 62
E           
E           
E           Script:
E           ---
E           str='(hi)'
E           [[ "${str}" =~ ^^([][{}\(\)^@])|^(~@)|(\"(\\.|[^\\\"])*\")|^(;[^$'\n']*)|^([~\'\`])|^([^][ ~\`\'\";{}\(\)^@\,]+)|^[,]|^[[:space:]]+ ]]
E           echo status=$?
E           
E           m=${BASH_REMATCH[0]}
E           echo m=$m
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[regex.test.sh::Operators and space lose meaning inside ()[L627]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109382330>
test_file = 'regex.test.sh'
test_case = TestCase(name='Operators and space lose meaning inside ()', script="[[ '< >' =~ (< >) ]] && echo true", assertions=[As...['zsh'], variant='N-I'), Assertion(type='status', value=1, shells=['zsh'], variant='N-I')], line_number=627, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Operators and space lose meaning inside () (line 627)
E           
E           Execution error: Expected ']]' to close conditional at line 1, column 14
E           
E           
E           Script:
E           ---
E           [[ '< >' =~ (< >) ]] && echo true
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[serialize.test.sh::printf %q newline[L12]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093823f0>
test_file = 'serialize.test.sh'
test_case = TestCase(name='printf %q newline', script='case $SH in ash) return ;; esac  # yash and ash don\'t implement this\n\nne...h'], variant='OK'), Assertion(type='stdout-json', value='', shells=['ash'], variant='N-I')], line_number=12, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf %q newline (line 12)
E           
E           stdout mismatch:
E             expected: "$'one\\ntwo'\nroundtrip-ok"
E             actual:   "$'one\\ntwo'"
E           
E           Expected stdout: "$'one\\ntwo'\nroundtrip-ok"
E           Actual stdout:   "$'one\\ntwo'\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           case $SH in ash) return ;; esac  # yash and ash don't implement this
E           
E           newline=$'one\ntwo'
E           printf '%q\n' "$newline"
E           
E           quoted="$(printf '%q\n' "$newline")"
E           restored=$(eval "echo $quoted")
E           test "$newline" = "$restored" && echo roundtrip-ok
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[serialize.test.sh::printf %q spaces[L36]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093824b0>
test_file = 'serialize.test.sh'
test_case = TestCase(name='printf %q spaces', script='case $SH in ash) return ;; esac  # yash and ash don\'t implement this\n\n# b...h'], variant='OK'), Assertion(type='stdout-json', value='', shells=['ash'], variant='N-I')], line_number=36, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf %q spaces (line 36)
E           
E           stdout mismatch:
E             expected: 'one\\ two'
E             actual:   "$'one two'"
E           
E           Expected stdout: 'one\\ two'
E           Actual stdout:   "$'one two'\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in ash) return ;; esac  # yash and ash don't implement this
E           
E           # bash does a weird thing and uses \
E           
E           spaces='one two'
E           printf '%q\n' "$spaces"
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[serialize.test.sh::printf %q quotes[L52]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109382570>
test_file = 'serialize.test.sh'
test_case = TestCase(name='printf %q quotes', script='case $SH in ash) return ;; esac  # yash and ash don\'t implement %q\n\nquote...'], variant='BUG'), Assertion(type='stdout-json', value='', shells=['ash'], variant='N-I')], line_number=52, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf %q quotes (line 52)
E           
E           stdout mismatch:
E             expected: '\\\'\\"\nroundtrip-ok'
E             actual:   '$\'\\\'"\''
E           
E           Expected stdout: '\\\'\\"\nroundtrip-ok'
E           Actual stdout:   '$\'\\\'"\'\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           case $SH in ash) return ;; esac  # yash and ash don't implement %q
E           
E           quotes=\'\"
E           printf '%q\n' "$quotes"
E           
E           quoted="$(printf '%q\n' "$quotes")"
E           restored=$(eval "echo $quoted")
E           test "$quotes" = "$restored" && echo roundtrip-ok
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[serialize.test.sh::printf %q unprintable[L76]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109382630>
test_file = 'serialize.test.sh'
test_case = TestCase(name='printf %q unprintable', script='case $SH in ash) return ;; esac  # yash and ash don\'t implement this\n...'], variant='BUG'), Assertion(type='stdout-json', value='', shells=['ash'], variant='N-I')], line_number=76, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf %q unprintable (line 76)
E           
E           stdout mismatch:
E             expected: "$'\\377'"
E             actual:   "$'\\xff'"
E           
E           Expected stdout: "$'\\377'"
E           Actual stdout:   "$'\\xff'\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in ash) return ;; esac  # yash and ash don't implement this
E           
E           unprintable=$'\xff'
E           printf '%q\n' "$unprintable"
E           
E           # bash and zsh agree
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[serialize.test.sh::printf %q unicode[L94]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093826f0>
test_file = 'serialize.test.sh'
test_case = TestCase(name='printf %q unicode', script='case $SH in ash) return ;; esac  # yash and ash don\'t implement this\n\nun...h'], variant='OK'), Assertion(type='stdout-json', value='', shells=['ash'], variant='N-I')], line_number=94, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf %q unicode (line 94)
E           
E           stdout mismatch:
E             expected: 'μ'
E             actual:   "$'\\xce\\xbc'"
E           
E           Expected stdout: 'μ'
E           Actual stdout:   "$'\\xce\\xbc'\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in ash) return ;; esac  # yash and ash don't implement this
E           
E           unicode=$'\u03bc'
E           unicode=$'\xce\xbc'  # does the same thing
E           
E           printf '%q\n' "$unicode"
E           
E           # OSH issue: we have quotes.  Isn't that OK?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[serialize.test.sh::printf %q invalid unicode[L111]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093827b0>
test_file = 'serialize.test.sh'
test_case = TestCase(name='printf %q invalid unicode', script='case $SH in ash) return ;; esac\n\n# Hm bash/mksh/zsh understand th...'], variant='OK'), Assertion(type='stdout-json', value='', shells=['ash'], variant='N-I')], line_number=111, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: printf %q invalid unicode (line 111)
E           
E           stdout mismatch:
E             expected: "$'\\316'\n$'\\316μ'\n$'μ\\316'\n$'\\316a'\n$'a\\316'"
E             actual:   "$'\\xce'\n$'\\xce\\xce\\xbc'\n$'\\xce\\xbc\\xce'\n$'\\xcea'\n$'a\\xce'"
E           
E           Expected stdout: "$'\\316'\n$'\\316μ'\n$'μ\\316'\n$'\\316a'\n$'a\\316'"
E           Actual stdout:   "$'\\xce'\n$'\\xce\\xce\\xbc'\n$'\\xce\\xbc\\xce'\n$'\\xcea'\n$'a\\xce'\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in ash) return ;; esac
E           
E           # Hm bash/mksh/zsh understand these.  They are doing decoding and error
E           # recovery!  inspecting the bash source seems to confirm this.
E           unicode=$'\xce'
E           printf '%q\n' "$unicode"
E           
E           unicode=$'\xce\xce\xbc'
E           printf '%q\n' "$unicode"
E           
E           unicode=$'\xce\xbc\xce'
E           printf '%q\n' "$unicode"
E           
E           case $SH in mksh) return ;; esac  # it prints unprintable chars here!
E           
E           unicode=$'\xcea'
E           printf '%q\n' "$unicode"
E           unicode=$'a\xce'
E           printf '%q\n' "$unicode"
E           ---

tests/spec_tests/test_spec.py:218: Failed
________ TestBashSpecTests.test_spec_case[serialize.test.sh::set[L159]] ________

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109382870>
test_file = 'serialize.test.sh'
test_case = TestCase(name='set', script="case $SH in zsh) return ;; esac  # zsh doesn't make much sense\n\nzz=$'one\\ntwo'\n\nset ...'], variant='OK'), Assertion(type='stdout-json', value='', shells=['zsh'], variant='BUG')], line_number=159, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set (line 159)
E           
E           stdout mismatch:
E             expected: "zz=$'one\\ntwo'"
E             actual:   "zz='one"
E           
E           Expected stdout: "zz=$'one\\ntwo'"
E           Actual stdout:   "zz='one\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh) return ;; esac  # zsh doesn't make much sense
E           
E           zz=$'one\ntwo'
E           
E           set | grep zz
E           ---

tests/spec_tests/test_spec.py:218: Failed
______ TestBashSpecTests.test_spec_case[serialize.test.sh::declare[L174]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109382930>
test_file = 'serialize.test.sh'
test_case = TestCase(name='declare', script="case $SH in ash|zsh) return ;; esac  # zsh doesn't make much sense\n\nzz=$'one\\ntwo'...], variant='BUG'), Assertion(type='stdout-json', value='', shells=['ash'], variant='N-I')], line_number=174, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: declare (line 174)
E           
E           stdout mismatch:
E             expected: "zz=$'one\\ntwo'\ndeclare -- zz=$'one\\ntwo'"
E             actual:   'declare -- zz="one\ndeclare -- zz="one\ntwo"'
E           
E           Expected stdout: "zz=$'one\\ntwo'\ndeclare -- zz=$'one\\ntwo'"
E           Actual stdout:   'declare -- zz="one\ndeclare -- zz="one\ntwo"\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in ash|zsh) return ;; esac  # zsh doesn't make much sense
E           
E           zz=$'one\ntwo'
E           
E           typeset | grep zz
E           typeset -p zz
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[serialize.test.sh::${var@Q}[L193]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093829f0>
test_file = 'serialize.test.sh'
test_case = TestCase(name='${var@Q}', script='case $SH in zsh|ash) exit ;; esac\n\nzz=$\'one\\ntwo \\u03bc\'\n\n# weirdly, quoted ...iant='OK'), Assertion(type='stdout-json', value='', shells=['ash', 'zsh'], variant='N-I')], line_number=193, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${var@Q} (line 193)
E           
E           stdout mismatch:
E             expected: "$'one\\ntwo μ'\n$'one\\ntwo μ'"
E             actual:   "'one two μ'\n'one\ntwo μ'"
E           
E           Expected stdout: "$'one\\ntwo μ'\n$'one\\ntwo μ'"
E           Actual stdout:   "'one two μ'\n'one\ntwo μ'\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh|ash) exit ;; esac
E           
E           zz=$'one\ntwo \u03bc'
E           
E           # weirdly, quoted and unquoted aren't different
E           echo ${zz@Q}
E           echo "${zz@Q}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[sh-func.test.sh::Return statement[L20]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109382cf0>
test_file = 'sh-func.test.sh'
test_case = TestCase(name='Return statement', script='f() {\n  echo one\n  return 42\n  echo two\n}\nf', assertions=[Assertion(typ... shells=None, variant=None), Assertion(type='status', value=42, shells=None, variant=None)], line_number=20, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Return statement (line 20)
E           
E           stdout mismatch:
E             expected: 'one'
E             actual:   ''
E           
E           Expected stdout: 'one'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 42
E           Actual status:   42
E           
E           Script:
E           ---
E           f() {
E             echo one
E             return 42
E             echo two
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-func.test.sh::return "" (a lot of disagreement)[L95]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109383170>
test_file = 'sh-func.test.sh'
test_case = TestCase(name='return "" (a lot of disagreement)', script='f() {\n  echo f\n  return ""\n}\n\nf\necho status=$?', asse...iant='BUG'), Assertion(type='stdout', value='f\nstatus=2', shells=['bash'], variant='BUG')], line_number=95, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: return "" (a lot of disagreement) (line 95)
E           
E           stdout mismatch:
E             expected: 'f\nstatus=2'
E             actual:   'f\nstatus=1'
E           
E           Expected stdout: 'f\nstatus=2'
E           Actual stdout:   'f\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: return: : numeric argument required\n'
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E             echo f
E             return ""
E           }
E           
E           f
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[sh-func.test.sh::return $empty[L124]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109383230>
test_file = 'sh-func.test.sh'
test_case = TestCase(name='return $empty', script='f() {\n  echo f\n  empty=\n  return $empty\n}\n\nf\necho status=$?', assertions=[Assertion(type='stdout', value='f\nstatus=0', shells=None, variant=None)], line_number=124, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: return $empty (line 124)
E           
E           stdout mismatch:
E             expected: 'f\nstatus=0'
E             actual:   'status=0'
E           
E           Expected stdout: 'f\nstatus=0'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E             echo f
E             empty=
E             return $empty
E           }
E           
E           f
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-func.test.sh::Scope of global variable when sourced in function (Shell Functions aren't Closures)[L158]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093833b0>
test_file = 'sh-func.test.sh'
test_case = TestCase(name="Scope of global variable when sourced in function (Shell Functions aren't Closures)", script='set -u\n\...], variant='OK'), Assertion(type='stdout', value='g = global', shells=None, variant=None)], line_number=158, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Scope of global variable when sourced in function (Shell Functions aren't Closures) (line 158)
E           
E           stdout mismatch:
E             expected: 'g = global'
E             actual:   ''
E           
E           Expected stdout: 'g = global'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: L: unbound variable\n'
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           set -u
E           
E           echo >tmp.sh '
E           g="global"
E           local L="local"
E           
E           test_func() {
E             echo "g = $g"
E             echo "L = $L"
E           }
E           '
E           
E           main() {
E             # a becomes local here
E             # test_func is defined globally
E             . ./tmp.sh
E           }
E           
E           main
E           
E           # a is not defined
E           test_func
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options-bash.test.sh::SHELLOPTS is updated when options are changed[L4]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109383470>
test_file = 'sh-options-bash.test.sh'
test_case = TestCase(name='SHELLOPTS is updated when options are changed', script='echo $SHELLOPTS | grep -q xtrace\necho $?\nset ...nt=None), Assertion(type='stdout', value='1\n1\n1', shells=['dash', 'mksh'], variant='N-I')], line_number=4, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: SHELLOPTS is updated when options are changed (line 4)
E           
E           stdout mismatch:
E             expected: '1\n0\n1'
E             actual:   '1\n1\n1'
E           
E           Expected stdout: '1\n0\n1'
E           Actual stdout:   '1\n1\n1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo $SHELLOPTS | grep -q xtrace
E           echo $?
E           set -x
E           echo $SHELLOPTS | grep -q xtrace
E           echo $?
E           set +x
E           echo $SHELLOPTS | grep -q xtrace
E           echo $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options-bash.test.sh::SHELLOPTS is readonly[L24]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109383530>
test_file = 'sh-options-bash.test.sh'
test_case = TestCase(name='SHELLOPTS is readonly', script='SHELLOPTS=x\necho status=$?\n\n# Setting a readonly variable in osh is ...h'], variant='OK'), Assertion(type='stdout-json', value='', shells=['bash'], variant='OK')], line_number=24, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: SHELLOPTS is readonly (line 24)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'status=0'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           SHELLOPTS=x
E           echo status=$?
E           
E           # Setting a readonly variable in osh is a hard failure.
E           # just-bash also treats readonly assignment as fatal (matches osh)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options-bash.test.sh::SHELLOPTS and BASHOPTS are non-empty[L37]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093835f0>
test_file = 'sh-options-bash.test.sh'
test_case = TestCase(name='SHELLOPTS and BASHOPTS are non-empty', script="# 2024-06 - tickled by Samuel testing Gentoo\n\nif test ...'dash'], variant='N-I'), Assertion(type='status', value=1, shells=['mksh'], variant='N-I')], line_number=37, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: SHELLOPTS and BASHOPTS are non-empty (line 37)
E           
E           stdout mismatch:
E             expected: 'shellopts is set\nbashopts is set'
E             actual:   ''
E           
E           Expected stdout: 'shellopts is set\nbashopts is set'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: \n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # 2024-06 - tickled by Samuel testing Gentoo
E           
E           if test -v SHELLOPTS; then
E             echo 'shellopts is set'
E           fi
E           if test -v BASHOPTS; then
E           	echo 'bashopts is set'
E           fi
E           
E           # bash: braceexpand:hashall etc.
E           
E           echo shellopts ${SHELLOPTS:?} > /dev/null
E           echo bashopts ${BASHOPTS:?} > /dev/null
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options-bash.test.sh::SHELLOPTS reflects flags like sh -x[L61]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093836b0>
test_file = 'sh-options-bash.test.sh'
test_case = TestCase(name='SHELLOPTS reflects flags like sh -x', script="$SH -x -c 'echo $SHELLOPTS' | grep -o xtrace", assertions=[Assertion(type='stdout', value='xtrace', shells=None, variant=None)], line_number=61, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: SHELLOPTS reflects flags like sh -x (line 61)
E           
E           stdout mismatch:
E             expected: 'xtrace'
E             actual:   ''
E           
E           Expected stdout: 'xtrace'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           $SH -x -c 'echo $SHELLOPTS' | grep -o xtrace
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options-bash.test.sh::export SHELLOPTS does cross-process tracing[L70]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109383770>
test_file = 'sh-options-bash.test.sh'
test_case = TestCase(name='export SHELLOPTS does cross-process tracing', script='$SH -c \'\nexport SHELLOPTS\nset -x\necho 1\n$SH ...ype='stdout', value="+ echo 1\n1\nsh -c 'echo 2'\n+ echo 2\n2", shells=None, variant=None)], line_number=70, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: export SHELLOPTS does cross-process tracing (line 70)
E           
E           stdout mismatch:
E             expected: "+ echo 1\n1\nsh -c 'echo 2'\n+ echo 2\n2"
E             actual:   '1\n2'
E           
E           Expected stdout: "+ echo 1\n1\nsh -c 'echo 2'\n+ echo 2\n2"
E           Actual stdout:   '1\n2\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           $SH -c '
E           export SHELLOPTS
E           set -x
E           echo 1
E           $SH -c "echo 2"
E           ' 2>&1 | sed 's/.*sh /sh /g'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options-bash.test.sh::export SHELLOPTS does cross-process tracing with bash[L88]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109383830>
test_file = 'sh-options-bash.test.sh'
test_case = TestCase(name='export SHELLOPTS does cross-process tracing with bash', script='# calling bash\n$SH -c \'\nexport SHELL...ype='stdout', value="+ echo 1\n1\nsh -c 'echo 2'\n+ echo 2\n2", shells=None, variant=None)], line_number=88, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: export SHELLOPTS does cross-process tracing with bash (line 88)
E           
E           stdout mismatch:
E             expected: "+ echo 1\n1\nsh -c 'echo 2'\n+ echo 2\n2"
E             actual:   '1\n2'
E           
E           Expected stdout: "+ echo 1\n1\nsh -c 'echo 2'\n+ echo 2\n2"
E           Actual stdout:   '1\n2\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # calling bash
E           $SH -c '
E           export SHELLOPTS
E           set -x
E           #echo SHELLOPTS=$SHELLOPTS
E           echo 1
E           bash -c "echo 2"
E           ' 2>&1 | sed 's/.*sh /sh /g'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options-bash.test.sh::OSH calling bash with SHELLOPTS does not change braceexpand[L108]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093838f0>
test_file = 'sh-options-bash.test.sh'
test_case = TestCase(name='OSH calling bash with SHELLOPTS does not change braceexpand', script='#echo outside=$SHELLOPTS\n\n# sed...sertion(type='stdout', value='braceexpand on\nbraceexpand on', shells=None, variant=None)], line_number=108, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: OSH calling bash with SHELLOPTS does not change braceexpand (line 108)
E           
E           stdout mismatch:
E             expected: 'braceexpand on\nbraceexpand on'
E             actual:   ''
E           
E           Expected stdout: 'braceexpand on\nbraceexpand on'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           #echo outside=$SHELLOPTS
E           
E           # sed pattern to normalize spaces
E           normalize='s/[ \t]\+/ /g'
E           
E           bash -c '
E           #echo bash=$SHELLOPTS
E           set -o | grep braceexpand | sed "$1"
E           ' unused "$normalize"
E           
E           env SHELLOPTS= bash -c '
E           #echo bash2=$SHELLOPTS
E           set -o | grep braceexpand | sed "$1"
E           ' unused "$normalize"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options-bash.test.sh::shopt -s progcomp hostcomplete are stubs (bash-completion)[L149]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109383a70>
test_file = 'sh-options-bash.test.sh'
test_case = TestCase(name='shopt -s progcomp hostcomplete are stubs (bash-completion)', script='shopt -s progcomp hostcomplete\nec...sertions=[Assertion(type='stdout', value='status=0\nstatus=0', shells=None, variant=None)], line_number=149, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: shopt -s progcomp hostcomplete are stubs (bash-completion) (line 149)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=0'
E             actual:   'status=1\nstatus=1'
E           
E           Expected stdout: 'status=0\nstatus=0'
E           Actual stdout:   'status=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: shopt: hostcomplete: invalid shell option name\nbash: shopt: hostcomplete: invalid shell option name\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s progcomp hostcomplete
E           echo status=$?
E           
E           shopt -u progcomp hostcomplete
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[sh-options.test.sh::$- with -c[L7]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109383b30>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='$- with -c', script="# dash's behavior seems most sensible here?\n$SH -o nounset -c 'echo $-'", asserti...hells=['mksh'], variant='OK'), Assertion(type='status', value=0, shells=None, variant=None)], line_number=7, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $- with -c (line 7)
E           
E           stdout mismatch:
E             expected: 'huBc'
E             actual:   ''
E           status mismatch: expected 0, got 127
E           
E           Expected stdout: 'huBc'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: -o: No such file or directory\n'
E           Expected status: 0
E           Actual status:   127
E           
E           Script:
E           ---
E           # dash's behavior seems most sensible here?
E           $SH -o nounset -c 'echo $-'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::$- with pipefail[L16]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109383bf0>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='$- with pipefail', script='# Note: pipefail has no short flag in $-, we now include h (hashall), B (bra...'dash'], variant='N-I'), Assertion(type='status', value=2, shells=['dash'], variant='N-I')], line_number=16, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $- with pipefail (line 16)
E           
E           stdout mismatch:
E             expected: 'huBs'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: 'huBs'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: -: unbound variable\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           # Note: pipefail has no short flag in $-, we now include h (hashall), B (braceexpand), and s (stdin reading)
E           set -o pipefail -o nounset
E           echo $-
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::$- and more options[L27]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109383cb0>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='$- and more options', script='set -efuC\no=$-\n[[ $o == *e* ]]; echo yes\n[[ $o == *f* ]]; echo yes\n[[...ash'], variant='N-I'), Assertion(type='status', value=127, shells=['dash'], variant='N-I')], line_number=27, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $- and more options (line 27)
E           
E           stdout mismatch:
E             expected: 'yes\nyes\nyes\nyes'
E             actual:   ''
E           
E           Expected stdout: 'yes\nyes\nyes\nyes'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: set: -f: invalid option\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           set -efuC
E           o=$-
E           [[ $o == *e* ]]; echo yes
E           [[ $o == *f* ]]; echo yes
E           [[ $o == *u* ]]; echo yes
E           [[ $o == *C* ]]; echo yes
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::$- with interactive shell[L43]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109383d70>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='$- with interactive shell', script="$SH -c 'echo $-' | grep i || echo FALSE\n$SH -i -c 'echo $-' | grep...RUE", assertions=[Assertion(type='stdout', value='FALSE\nTRUE', shells=None, variant=None)], line_number=43, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $- with interactive shell (line 43)
E           
E           stdout mismatch:
E             expected: 'FALSE\nTRUE'
E             actual:   'FALSE'
E           
E           Expected stdout: 'FALSE\nTRUE'
E           Actual stdout:   'FALSE\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           $SH -c 'echo $-' | grep i || echo FALSE
E           $SH -i -c 'echo $-' | grep -q i && echo TRUE
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::pass short options like sh -e[L51]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109383e30>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='pass short options like sh -e', script="$SH -e -c 'false; echo status=$?'", assertions=[Assertion(type=..., shells=None, variant=None), Assertion(type='status', value=1, shells=None, variant=None)], line_number=51, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: pass short options like sh -e (line 51)
E           
E           status mismatch: expected 1, got 127
E           
E           Expected stdout: ''
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: -e: No such file or directory\n'
E           Expected status: 1
E           Actual status:   127
E           
E           Script:
E           ---
E           $SH -e -c 'false; echo status=$?'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::pass long options like sh -o errexit[L57]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109383ef0>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='pass long options like sh -o errexit', script="$SH -o errexit -c 'false; echo status=$?'", assertions=[..., shells=None, variant=None), Assertion(type='status', value=1, shells=None, variant=None)], line_number=57, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: pass long options like sh -o errexit (line 57)
E           
E           status mismatch: expected 1, got 127
E           
E           Expected stdout: ''
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: -o: No such file or directory\n'
E           Expected status: 1
E           Actual status:   127
E           
E           Script:
E           ---
E           $SH -o errexit -c 'false; echo status=$?'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::pass shopt options like sh -O nullglob[L63]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109383fb0>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='pass shopt options like sh -O nullglob', script="$SH +O nullglob -c 'echo foo *.nonexistent bar'\n$SH -...'dash'], variant='N-I'), Assertion(type='status', value=1, shells=['mksh'], variant='N-I')], line_number=63, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: pass shopt options like sh -O nullglob (line 63)
E           
E           stdout mismatch:
E             expected: 'foo *.nonexistent bar\nfoo bar'
E             actual:   ''
E           
E           Expected stdout: 'foo *.nonexistent bar\nfoo bar'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: +O: No such file or directory\nbash: -O: No such file or directory\n'
E           Expected status: None
E           Actual status:   127
E           
E           Script:
E           ---
E           $SH +O nullglob -c 'echo foo *.nonexistent bar'
E           $SH -O nullglob -c 'echo foo *.nonexistent bar'
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[sh-options.test.sh::set -o vi/emacs[L75]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a40b0>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='set -o vi/emacs', script='set -o vi\necho $?\nset -o emacs\necho $?', assertions=[Assertion(type='stdout', value='0\n0', shells=None, variant=None)], line_number=75, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set -o vi/emacs (line 75)
E           
E           stdout mismatch:
E             expected: '0\n0'
E             actual:   '1\n1'
E           
E           Expected stdout: '0\n0'
E           Actual stdout:   '1\n1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: vi: invalid option name\nbash: set: emacs: invalid option name\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -o vi
E           echo $?
E           set -o emacs
E           echo $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::vi and emacs are mutually exclusive[L85]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a4170>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='vi and emacs are mutually exclusive', script="show() {\n  shopt -o -p | egrep 'emacs$|vi$'\n  echo ___\...), Assertion(type='stdout', value='___\n___\n___', shells=['dash', 'mksh'], variant='N-I')], line_number=85, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: vi and emacs are mutually exclusive (line 85)
E           
E           stdout mismatch:
E             expected: 'set +o emacs\nset +o vi\n___\nset -o emacs\nset +o vi\n___\nset +o emacs\nset -o vi\n___'
E             actual:   '___\n___\n___'
E           
E           Expected stdout: 'set +o emacs\nset +o vi\n___\nset -o emacs\nset +o vi\n___\nset +o emacs\nset -o vi\n___'
E           Actual stdout:   '___\n___\n___\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: emacs: invalid option name\nbash: set: vi: invalid option name\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show() {
E             shopt -o -p | egrep 'emacs$|vi$'
E             echo ___
E           };
E           show
E           
E           set -o emacs
E           show
E           
E           set -o vi
E           show
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::interactive shell starts with emacs mode on[L115]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a4230>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='interactive shell starts with emacs mode on', script='case $SH in dash) exit ;; esac\ncase $SH in bash|...], variant='OK'), Assertion(type='stdout-json', value='', shells=['dash'], variant='N-I')], line_number=115, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: interactive shell starts with emacs mode on (line 115)
E           
E           stdout mismatch:
E             expected: 'non-interactive\n1\n1\ninteractive\n0\n1'
E             actual:   'non-interactive\ninteractive'
E           
E           Expected stdout: 'non-interactive\n1\n1\ninteractive\n0\n1'
E           Actual stdout:   'non-interactive\ninteractive\n'
E           Expected stderr: None
E           Actual stderr:   'bash: --rcfile: No such file or directory\nbash: --rcfile: No such file or directory\n'
E           Expected status: None
E           Actual status:   127
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           case $SH in bash|*osh) flag='--rcfile /dev/null' ;; esac
E           
E           code='test -o emacs; echo $?; test -o vi; echo $?'
E           
E           echo non-interactive
E           $SH $flag -c "$code"
E           
E           echo interactive
E           $SH $flag -i -c "$code"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::-n for no execution (useful with --ast-output)[L164]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a4470>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='-n for no execution (useful with --ast-output)', script="# NOTE: set +n doesn't work because nothing is... shells=None, variant=None), Assertion(type='status', value=0, shells=None, variant=None)], line_number=164, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: -n for no execution (useful with --ast-output) (line 164)
E           
E           stdout mismatch:
E             expected: '1'
E             actual:   '1\n2\n3'
E           
E           Expected stdout: '1'
E           Actual stdout:   '1\n2\n3\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: -n: invalid option\nbash: set: -n: invalid option\n'
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # NOTE: set +n doesn't work because nothing is executed!
E           echo 1
E           set -n
E           echo 2
E           set +n
E           echo 3
E           # osh doesn't work because it only checks -n in bin/oil.py?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::shopt -p -o prints 'set' options[L195]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a45f0>
test_file = 'sh-options.test.sh'
test_case = TestCase(name="shopt -p -o prints 'set' options", script="case $SH in dash|mksh) exit ;; esac\n\nshopt -po nounset\nse...variant=None), Assertion(type='stdout', value='', shells=['dash', 'mksh'], variant='N-I')], line_number=195, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: shopt -p -o prints 'set' options (line 195)
E           
E           stdout mismatch:
E             expected: 'set +o nounset\nset -o nounset\n--\nerrexit\nnoglob\nnounset'
E             actual:   '--'
E           
E           Expected stdout: 'set +o nounset\nset -o nounset\n--\nerrexit\nnoglob\nnounset'
E           Actual stdout:   '--\n'
E           Expected stderr: None
E           Actual stderr:   'bash: shopt: -o: invalid option\nbash: shopt: -o: invalid option\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           case $SH in dash|mksh) exit ;; esac
E           
E           shopt -po nounset
E           set -o nounset
E           shopt -po nounset
E           
E           echo --
E           
E           shopt -po | egrep -o 'errexit|noglob|nounset'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::shopt -o prints 'set' options[L217]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a46b0>
test_file = 'sh-options.test.sh'
test_case = TestCase(name="shopt -o prints 'set' options", script="case $SH in dash|mksh) exit ;; esac\n\nshopt -o | egrep -o 'err...variant=None), Assertion(type='stdout', value='', shells=['dash', 'mksh'], variant='N-I')], line_number=217, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: shopt -o prints 'set' options (line 217)
E           
E           stdout mismatch:
E             expected: 'errexit\nnoglob\nnounset\n--'
E             actual:   '--'
E           
E           Expected stdout: 'errexit\nnoglob\nnounset\n--'
E           Actual stdout:   '--\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|mksh) exit ;; esac
E           
E           shopt -o | egrep -o 'errexit|noglob|nounset'
E           echo --
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[sh-options.test.sh::noclobber off[L264]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a48f0>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='noclobber off', script='set -o errexit\n\necho foo > can-clobber\necho status=$?\nset +C\n\necho foo > ...tion(type='stdout', value='status=0\nstatus=0\nstatus=0\nfoo', shells=None, variant=None)], line_number=264, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: noclobber off (line 264)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=0\nstatus=0\nfoo'
E             actual:   'status=0'
E           
E           Expected stdout: 'status=0\nstatus=0\nstatus=0\nfoo'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: -C: invalid option\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           set -o errexit
E           
E           echo foo > can-clobber
E           echo status=$?
E           set +C
E           
E           echo foo > can-clobber
E           echo status=$?
E           set +o noclobber
E           
E           echo foo > can-clobber
E           echo status=$?
E           cat can-clobber
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[sh-options.test.sh::noclobber on[L286]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a49b0>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='noclobber on', script='rm -f no-clobber\nset -C\n\necho foo > no-clobber\necho create=$?\n\necho overwr...e='stdout', value='create=0\noverwrite=2\nforce=0\nforce', shells=['dash'], variant='OK')], line_number=286, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: noclobber on (line 286)
E           
E           stdout mismatch:
E             expected: 'create=0\noverwrite=1\nforce=0\nforce'
E             actual:   'create=0\noverwrite=0\nforce\nforce=0\noverwrite'
E           
E           Expected stdout: 'create=0\noverwrite=1\nforce=0\nforce'
E           Actual stdout:   'create=0\noverwrite=0\nforce\nforce=0\noverwrite\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: -C: invalid option\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           rm -f no-clobber
E           set -C
E           
E           echo foo > no-clobber
E           echo create=$?
E           
E           echo overwrite > no-clobber
E           echo overwrite=$?
E           
E           echo force >| no-clobber
E           echo force=$?
E           
E           cat no-clobber
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::noclobber on <>[L314]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a4a70>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='noclobber on <>', script='set -C\necho foo >| $TMP/no-clobber\nexec 3<> $TMP/no-clobber\nread -n 1 <&3\...one, variant=None), Assertion(type='stdout', value='.oo', shells=['dash'], variant='N-I')], line_number=314, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: noclobber on <> (line 314)
E           
E           stdout mismatch:
E             expected: 'f.o'
E             actual:   'foo'
E           
E           Expected stdout: 'f.o'
E           Actual stdout:   'foo\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: -C: invalid option\ncat: /tmp/no-clobber: No such file or directory\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           set -C
E           echo foo >| $TMP/no-clobber
E           exec 3<> $TMP/no-clobber
E           read -n 1 <&3
E           echo -n . >&3
E           exec 3>&-
E           cat $TMP/no-clobber
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::noclobber on &> >[L343]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a4bf0>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='noclobber on &> >', script='case $SH in dash) exit ;; esac\n\nset -C\n\nrm -f $TMP/no-clobber\necho foo...s=None, variant=None), Assertion(type='stdout', value='', shells=['dash'], variant='BUG')], line_number=343, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: noclobber on &> > (line 343)
E           
E           stdout mismatch:
E             expected: 'stdout=0\nagain=1\nfoo\nboth=0\nagain=1\nbaz'
E             actual:   'stdout=0\nagain=0\nbar\nboth=0\nagain=0\nfoo'
E           
E           Expected stdout: 'stdout=0\nagain=1\nfoo\nboth=0\nagain=1\nbaz'
E           Actual stdout:   'stdout=0\nagain=0\nbar\nboth=0\nagain=0\nfoo\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: -C: invalid option\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           set -C
E           
E           rm -f $TMP/no-clobber
E           echo foo > $TMP/no-clobber
E           echo stdout=$?
E           echo bar > $TMP/no-clobber
E           echo again=$?
E           cat $TMP/no-clobber
E           
E           rm -f $TMP/no-clobber
E           echo baz &> $TMP/no-clobber
E           echo both=$?
E           echo foo &> $TMP/no-clobber
E           echo again=$?
E           cat $TMP/no-clobber
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::noclobber on &>> >>[L373]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a4cb0>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='noclobber on &>> >>', script="case $SH in dash) echo 'flaky'; exit ;; esac\n\nset -C\n\nrm -f $TMP/no-c...e, variant=None), Assertion(type='stdout', value='flaky', shells=['dash'], variant='BUG')], line_number=373, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: noclobber on &>> >> (line 373)
E           
E           stdout mismatch:
E             expected: 'stdout=0\nagain=0\nfoo\nbar\nboth=0\nagain=0\nbaz\nfoo'
E             actual:   'stdout=0\nagain=0\nfoo\nbar\nbaz\nboth=0\nfoo\nagain=0'
E           
E           Expected stdout: 'stdout=0\nagain=0\nfoo\nbar\nboth=0\nagain=0\nbaz\nfoo'
E           Actual stdout:   'stdout=0\nagain=0\nfoo\nbar\nbaz\nboth=0\nfoo\nagain=0\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: -C: invalid option\ncat: /tmp/no-clobber: No such file or directory\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           case $SH in dash) echo 'flaky'; exit ;; esac
E           
E           set -C
E           
E           rm -f $TMP/no-clobber
E           echo foo >> $TMP/no-clobber
E           echo stdout=$?
E           echo bar >> $TMP/no-clobber
E           echo again=$?
E           cat $TMP/no-clobber
E           
E           rm -f $TMP/no-clobber
E           echo baz &>> $TMP/no-clobber
E           echo both=$?
E           echo foo &>> $TMP/no-clobber
E           echo again=$?
E           cat $TMP/no-clobber
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::set without args lists variables[L406]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a4d70>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='set without args lists variables', script="__GLOBAL=g\nf() {\n  local __mylocal=L\n  local __OTHERLOCAL...OTHERLOCAL='L'\n__mylocal='L'\n__var_in_parent_scope='D'", shells=['dash'], variant='OK')], line_number=406, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set without args lists variables (line 406)
E           
E           stdout mismatch:
E             expected: '__GLOBAL=mutated\n__OTHERLOCAL=L\n__mylocal=L\n__var_in_parent_scope=D'
E             actual:   "__GLOBAL='mutated'\n__OTHERLOCAL='L'\n__mylocal='L'\n__var_in_parent_scope='D'"
E           
E           Expected stdout: '__GLOBAL=mutated\n__OTHERLOCAL=L\n__mylocal=L\n__var_in_parent_scope=D'
E           Actual stdout:   "__GLOBAL='mutated'\n__OTHERLOCAL='L'\n__mylocal='L'\n__var_in_parent_scope='D'\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           __GLOBAL=g
E           f() {
E             local __mylocal=L
E             local __OTHERLOCAL=L
E             __GLOBAL=mutated
E             set | grep '^__'
E           }
E           g() {
E             local __var_in_parent_scope=D
E             f
E           }
E           g
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::set without args and array variables[L439]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a4e30>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='set without args and array variables', script="declare -a __array\n__array=(1 2 '3 4')\nset | grep '^__...dash'], variant='N-I'), Assertion(type='status', value=2, shells=['dash'], variant='N-I')], line_number=439, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set without args and array variables (line 439)
E           
E           stdout mismatch:
E             expected: '__array=([0]="1" [1]="2" [2]="3 4")'
E             actual:   "__array_0='1'\n__array_1='2'\n__array_2='3 4'\n__array__is_array='indexed'"
E           
E           Expected stdout: '__array=([0]="1" [1]="2" [2]="3 4")'
E           Actual stdout:   "__array_0='1'\n__array_1='2'\n__array_2='3 4'\n__array__is_array='indexed'\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -a __array
E           __array=(1 2 '3 4')
E           set | grep '^__'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::set without args and assoc array variables (not in OSH)[L460]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a4ef0>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='set without args and assoc array variables (not in OSH)', script="typeset -A __assoc\n__assoc['k e y']=...['osh'], variant='N-I'), Assertion(type='status', value=1, shells=['osh'], variant='N-I')], line_number=460, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set without args and assoc array variables (not in OSH) (line 460)
E           
E           stdout mismatch:
E             expected: '__assoc=([a]="b" ["k e y"]="v a l" )'
E             actual:   "__assoc_'k e y'='v a l'\n__assoc__is_array='assoc'\n__assoc_a='b'"
E           
E           Expected stdout: '__assoc=([a]="b" ["k e y"]="v a l" )'
E           Actual stdout:   "__assoc_'k e y'='v a l'\n__assoc__is_array='assoc'\n__assoc_a='b'\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset -A __assoc
E           __assoc['k e y']='v a l'
E           __assoc[a]=b
E           set | grep '^__'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::shopt -p validates option names[L573]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a52b0>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='shopt -p validates option names', script="shopt -p nullglob invalid failglob\necho status=$?\n# same th...on(type='stdout', value='status=127\nstatus=127', shells=['dash', 'mksh'], variant='N-I')], line_number=573, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: shopt -p validates option names (line 573)
E           
E           stdout mismatch:
E             expected: 'shopt -u nullglob\nshopt -u failglob\nstatus=1\nnullglob off\nfailglob off\nstatus=1'
E             actual:   'status=1\nstatus=1'
E           
E           Expected stdout: 'shopt -u nullglob\nshopt -u failglob\nstatus=1\nnullglob off\nfailglob off\nstatus=1'
E           Actual stdout:   'status=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: shopt: invalid: invalid shell option name\nbash: shopt: invalid: invalid shell option name\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -p nullglob invalid failglob
E           echo status=$?
E           # same thing as -p, slightly different format in bash
E           shopt nullglob invalid failglob > $TMP/out.txt
E           status=$?
E           sed --regexp-extended 's/\s+/ /' $TMP/out.txt  # make it easier to assert
E           echo status=$status
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::shopt -p -o validates option names[L598]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a5370>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='shopt -p -o validates option names', script='shopt -p -o errexit invalid nounset\necho status=$?', asse...K'), Assertion(type='stdout', value='status=127', shells=['dash', 'mksh'], variant='N-I')], line_number=598, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: shopt -p -o validates option names (line 598)
E           
E           stdout mismatch:
E             expected: 'set +o errexit\nset +o nounset\nstatus=1'
E             actual:   'status=1'
E           
E           Expected stdout: 'set +o errexit\nset +o nounset\nstatus=1'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: shopt: -o: invalid option\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -p -o errexit invalid nounset
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::stubbed out bash options[L614]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a5430>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='stubbed out bash options', script='shopt -s ignore_shopt_not_impl\nfor name in foo autocd cdable_vars c...sertion(type='stdout', value='127\n127\n127\n127', shells=['dash', 'mksh'], variant='OK')], line_number=614, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: stubbed out bash options (line 614)
E           
E           stdout mismatch:
E             expected: '1\n0\n0\n0'
E             actual:   '0\n0\n0\n0'
E           
E           Expected stdout: '1\n0\n0\n0'
E           Actual stdout:   '0\n0\n0\n0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s ignore_shopt_not_impl
E           for name in foo autocd cdable_vars checkwinsize; do
E             shopt -s $name
E             echo $?
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[sh-options.test.sh::shopt -s nounset works in YSH, not in bash[L639]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a54f0>
test_file = 'sh-options.test.sh'
test_case = TestCase(name='shopt -s nounset works in YSH, not in bash', script="case $SH in\n  *dash|*mksh)\n    echo N-I\n    exi...iant='OK'), Assertion(type='stdout', value='N-I', shells=['dash', 'mksh'], variant='N-I')], line_number=639, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: shopt -s nounset works in YSH, not in bash (line 639)
E           
E           stdout mismatch:
E             expected: 'status=1\nnounset off'
E             actual:   'status=0\nnounset         off'
E           
E           Expected stdout: 'status=1\nnounset off'
E           Actual stdout:   'status=0\nnounset         off\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in
E             *dash|*mksh)
E               echo N-I
E               exit
E               ;;
E           esac
E           shopt -s nounset
E           echo status=$?
E           
E           # get rid of extra space in bash output
E           set -o | grep nounset | sed 's/[ \t]\+/ /g'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[shell-bugs.test.sh::./configure idiom[L17]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a58b0>
test_file = 'shell-bugs.test.sh'
test_case = TestCase(name='./configure idiom', script="set -o errexit\n\nif command time -f '%e %M' true; then\n  echo 'supports -... assertions=[Assertion(type='stdout', value='supports -f\nenv', shells=None, variant=None)], line_number=17, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ./configure idiom (line 17)
E           
E           stdout mismatch:
E             expected: 'supports -f\nenv'
E             actual:   ''
E           
E           Expected stdout: 'supports -f\nenv'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: -f: command not found\n\nreal\t0m0.000s\nuser\t0m0.000s\nsys\t0m0.000s\nbash: -f: command not found\n\nreal\t0m0.000s\nuser\t0m0.000s\nsys\t0m0.000s\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -o errexit
E           
E           if command time -f '%e %M' true; then
E             echo 'supports -f'
E             # BUG: this was wrong
E             #time -f '%e %M' true
E           
E             # Need 'command time'
E             command time -f '%e %M' true
E           fi
E           
E           if env time -f '%e %M' true; then
E             echo 'env'
E             env time -f '%e %M' true
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[shell-grammar.test.sh::Invalid token[L35]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a5f70>
test_file = 'shell-grammar.test.sh'
test_case = TestCase(name='Invalid token', script=';;', assertions=[Assertion(type='status', value=2, shells=None, variant=None)], line_number=35, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Invalid token (line 35)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 1, column 1
E           
E           
E           Script:
E           ---
E           ;;
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[shell-grammar.test.sh::If with then on same line missing semicolon[L112]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a6ab0>
test_file = 'shell-grammar.test.sh'
test_case = TestCase(name='If with then on same line missing semicolon', script="# My ANTLR parsers fail to flag this.  The 'else'...se\n  echo\nfi", assertions=[Assertion(type='status', value=2, shells=None, variant=None)], line_number=112, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: If with then on same line missing semicolon (line 112)
E           
E           Execution error: Expected 'then' after condition at line 4, column 1
E           
E           
E           Script:
E           ---
E           # My ANTLR parsers fail to flag this.  The 'else' keyword should be unexpected.
E           if echo then
E             echo
E           else
E             echo
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[shell-grammar.test.sh::case item without ;; is not allowed[L154]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a6ff0>
test_file = 'shell-grammar.test.sh'
test_case = TestCase(name='case item without ;; is not allowed', script='case word_a in\n  word_a)\n  word_b)\n    echo\n    ;;\nesac', assertions=[Assertion(type='status', value=2, shells=None, variant=None)], line_number=154, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: case item without ;; is not allowed (line 154)
E           
E           Execution error: Expected pattern in case item at line 3, column 9
E           
E           
E           Script:
E           ---
E           case word_a in
E             word_a)
E             word_b)
E               echo
E               ;;
E           esac
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[shell-grammar.test.sh::Case all on one line without trailing ;; or ;[L186]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a73b0>
test_file = 'shell-grammar.test.sh'
test_case = TestCase(name='Case all on one line without trailing ;; or ;', script="# My ANTLR parsers don't fail here and they sho...ho word_c esac", assertions=[Assertion(type='status', value=2, shells=None, variant=None)], line_number=186, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Case all on one line without trailing ;; or ; (line 186)
E           
E           Execution error: Expected 'esac' to close case statement at line 2, column 62
E           
E           
E           Script:
E           ---
E           # My ANTLR parsers don't fail here and they should.
E           case word_a in word_b) echo word_b;; word_c) echo word_c esac
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[shell-grammar.test.sh::case: Using ; instead of ;;[L191]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a7470>
test_file = 'shell-grammar.test.sh'
test_case = TestCase(name='case: Using ; instead of ;;', script='case word_a in\n  word_a)\n    ;\n  word_b)\n    echo\n    ;;\nesac', assertions=[Assertion(type='status', value=2, shells=None, variant=None)], line_number=191, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: case: Using ; instead of ;; (line 191)
E           
E           Execution error: Expected pattern in case item at line 3, column 5
E           
E           
E           Script:
E           ---
E           case word_a in
E             word_a)
E               ;
E             word_b)
E               echo
E               ;;
E           esac
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[smoke.test.sh::here doc with var[L44]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093a7a70>
test_file = 'smoke.test.sh'
test_case = TestCase(name='here doc with var', script='v=one\ntac <<EOF\n$v\n"two\nEOF', assertions=[Assertion(type='stdout-json', value='"two\none\n', shells=None, variant=None)], line_number=44, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: here doc with var (line 44)
E           
E           stdout mismatch:
E             expected: '"two\none'
E             actual:   'two\none'
E           
E           Expected stdout: '"two\none\n'
E           Actual stdout:   'two\none\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           v=one
E           tac <<EOF
E           $v
E           "two
E           EOF
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[strict-options.test.sh::Sourcing a script that returns at the top level[L37]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d0470>
test_file = 'strict-options.test.sh'
test_case = TestCase(name='Sourcing a script that returns at the top level', script="# Create temp script inline - echoes its name...ssertion(type='stdout', value='one\nreturn-helper.sh\n42\ntwo', shells=None, variant=None)], line_number=37, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Sourcing a script that returns at the top level (line 37)
E           
E           Execution error: return 42
E           
E           
E           Script:
E           ---
E           # Create temp script inline - echoes its name and returns 42
E           cat > /tmp/return-helper.sh <<'SCRIPT'
E           echo return-helper.sh
E           return 42
E           SCRIPT
E           
E           echo one
E           . /tmp/return-helper.sh
E           echo $?
E           echo two
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[strict-options.test.sh::return at top level is an error[L76]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d05f0>
test_file = 'strict-options.test.sh'
test_case = TestCase(name='return at top level is an error', script='return\necho "status=$?"', assertions=[Assertion(type='stdout..., variant=None), Assertion(type='stdout', value='status=1', shells=['bash'], variant='OK')], line_number=76, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: return at top level is an error (line 76)
E           
E           Execution error: return 0
E           
E           
E           Script:
E           ---
E           return
E           echo "status=$?"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[strict-options.test.sh::empty argv WITHOUT strict_argv[L95]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d0830>
test_file = 'strict-options.test.sh'
test_case = TestCase(name='empty argv WITHOUT strict_argv', script='x=\'\'\n$x\necho status=$?\n\nif $x; then\n  echo VarSub\nfi\n...tatus=0\nVarSub\nCommandSub\nVarSub FAILED\nCommandSub FAILED', shells=None, variant=None)], line_number=95, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: empty argv WITHOUT strict_argv (line 95)
E           
E           stdout mismatch:
E             expected: 'status=0\nVarSub\nCommandSub\nVarSub FAILED\nCommandSub FAILED'
E             actual:   'status=1\nVarSub FAILED\nCommandSub FAILED'
E           
E           Expected stdout: 'status=0\nVarSub\nCommandSub\nVarSub FAILED\nCommandSub FAILED'
E           Actual stdout:   'status=1\nVarSub FAILED\nCommandSub FAILED\n'
E           Expected stderr: None
E           Actual stderr:   'bash: : command not found\nbash: : command not found\nbash: : command not found\nbash: : command not found\nbash: : command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x=''
E           $x
E           echo status=$?
E           
E           if $x; then
E             echo VarSub
E           fi
E           
E           if $(echo foo >/dev/null); then
E             echo CommandSub
E           fi
E           
E           if "$x"; then
E             echo VarSub
E           else
E             echo VarSub FAILED
E           fi
E           
E           if "$(echo foo >/dev/null)"; then
E             echo CommandSub
E           else
E             echo CommandSub FAILED
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[strict-options.test.sh::automatically creating arrays are INDEXED, not associative[L185]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d0b30>
test_file = 'strict-options.test.sh'
test_case = TestCase(name='automatically creating arrays are INDEXED, not associative', script='shopt -u strict_arith || true\n\nu..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['dash'], variant='N-I')], line_number=185, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: automatically creating arrays are INDEXED, not associative (line 185)
E           
E           stdout mismatch:
E             expected: "['zzz', 'x', 'y']"
E             actual:   "['x', 'y']"
E           
E           Expected stdout: "['zzz', 'x', 'y']"
E           Actual stdout:   "['x', 'y']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -u strict_arith || true
E           
E           undef[2]=x
E           undef[3]=y
E           x='bad'
E           # bad gets coerced to zero, but this is part of the RECURSIVE arithmetic
E           # behavior, which we want to disallow.  Consider disallowing in OSH.
E           
E           undef[$x]=zzz
E           argv.py "${undef[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[temp-binding.test.sh::FOO=bar $unset - temp binding, then empty argv from unquoted unset var (#2411)[L161]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d12b0>
test_file = 'temp-binding.test.sh'
test_case = TestCase(name='FOO=bar $unset - temp binding, then empty argv from unquoted unset var (#2411)', script='foo=alive! $un...ho $foo', assertions=[Assertion(type='stdout', value='alive!', shells=None, variant=None)], line_number=161, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: FOO=bar $unset - temp binding, then empty argv from unquoted unset var (#2411) (line 161)
E           
E           stdout mismatch:
E             expected: 'alive!'
E             actual:   ''
E           
E           Expected stdout: 'alive!'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   'bash: : command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           foo=alive! $unset
E           echo $foo
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[tilde.test.sh::~ expansion in readonly assignment[L9]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d1430>
test_file = 'tilde.test.sh'
test_case = TestCase(name='~ expansion in readonly assignment', script='# dash fails here!\n# http://stackoverflow.com/questions/8...one, variant=None), Assertion(type='stdout', value='~/src', shells=['dash'], variant='BUG')], line_number=9, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ~ expansion in readonly assignment (line 9)
E           
E           stdout mismatch:
E             expected: '/home/bob/src'
E             actual:   '~/src'
E           
E           Expected stdout: '/home/bob/src'
E           Actual stdout:   '~/src\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # dash fails here!
E           # http://stackoverflow.com/questions/8441473/tilde-expansion-doesnt-work-when-i-logged-into-gui
E           HOME=/home/bob
E           readonly const=~/src
E           echo $const
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[tilde.test.sh::No tilde expansion in word that looks like assignment but isn't[L25]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d15b0>
test_file = 'tilde.test.sh'
test_case = TestCase(name="No tilde expansion in word that looks like assignment but isn't", script='# bash and mksh mistakenly ex...ne), Assertion(type='stdout', value='x=/home/bob', shells=['bash', 'mksh'], variant='BUG')], line_number=25, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: No tilde expansion in word that looks like assignment but isn't (line 25)
E           
E           stdout mismatch:
E             expected: 'x=/home/bob'
E             actual:   'x=~'
E           
E           Expected stdout: 'x=/home/bob'
E           Actual stdout:   'x=~\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash and mksh mistakenly expand here!
E           # bash fixes this in POSIX mode (gah).
E           # http://lists.gnu.org/archive/html/bug-bash/2016-06/msg00001.html
E           HOME=/home/bob
E           echo x=~
E           ---

tests/spec_tests/test_spec.py:218: Failed
______ TestBashSpecTests.test_spec_case[tilde.test.sh::${undef:-~}[L48]] _______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d17f0>
test_file = 'tilde.test.sh'
test_case = TestCase(name='${undef:-~}', script='HOME=/home/bar\necho ${undef:-~}\necho ${HOME:+~/z}\necho "${undef:-~}"\necho ${u...[Assertion(type='stdout', value='/home/bar\n/home/bar/z\n~\n~', shells=None, variant=None)], line_number=48, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${undef:-~} (line 48)
E           
E           stdout mismatch:
E             expected: '/home/bar\n/home/bar/z\n~\n~'
E             actual:   '/home/bar\n/home/bar/z\n/home/bar\n~'
E           
E           Expected stdout: '/home/bar\n/home/bar/z\n~\n~'
E           Actual stdout:   '/home/bar\n/home/bar/z\n/home/bar\n~\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           HOME=/home/bar
E           echo ${undef:-~}
E           echo ${HOME:+~/z}
E           echo "${undef:-~}"
E           echo ${undef:-"~"}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[tilde.test.sh::x=foo:~ has tilde expansion[L80]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d1970>
test_file = 'tilde.test.sh'
test_case = TestCase(name='x=foo:~ has tilde expansion', script='HOME=/home/bar\nx=foo:~\necho $x\necho "$x"  # quotes don\'t matt.../home/bar\nfoo:/home/bar\nfoo:~\nfoo:~,\n/home/bar:foo\nfoo:~', shells=None, variant=None)], line_number=80, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: x=foo:~ has tilde expansion (line 80)
E           
E           stdout mismatch:
E             expected: 'foo:/home/bar\nfoo:/home/bar\nfoo:~\nfoo:~,\n/home/bar:foo\nfoo:~'
E             actual:   'foo:/home/bar\nfoo:/home/bar\nfoo:~\nfoo:/home/bar,\n/home/bar:foo\nfoo:~'
E           
E           Expected stdout: 'foo:/home/bar\nfoo:/home/bar\nfoo:~\nfoo:~,\n/home/bar:foo\nfoo:~'
E           Actual stdout:   'foo:/home/bar\nfoo:/home/bar\nfoo:~\nfoo:/home/bar,\n/home/bar:foo\nfoo:~\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           HOME=/home/bar
E           x=foo:~
E           echo $x
E           echo "$x"  # quotes don't matter, the expansion happens on assignment?
E           x='foo:~'
E           echo $x
E           
E           x=foo:~,  # comma ruins it, must be /
E           echo $x
E           
E           x=~:foo
E           echo $x
E           
E           # no tilde expansion here
E           echo foo:~
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[tilde.test.sh::tilde expansion an assignment keyword[L123]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d1af0>
test_file = 'tilde.test.sh'
test_case = TestCase(name='tilde expansion an assignment keyword', script='HOME=/home/bar\nf() {\n  local x=foo:~\n  echo $x\n}\nf...e, variant=None), Assertion(type='stdout', value='foo:~', shells=['dash'], variant='BUG')], line_number=123, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: tilde expansion an assignment keyword (line 123)
E           
E           stdout mismatch:
E             expected: 'foo:/home/bar'
E             actual:   'foo:~'
E           
E           Expected stdout: 'foo:/home/bar'
E           Actual stdout:   'foo:~\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           HOME=/home/bar
E           f() {
E             local x=foo:~
E             echo $x
E           }
E           f
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[tilde.test.sh::x=${undef-~:~}[L137]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d1bb0>
test_file = 'tilde.test.sh'
test_case = TestCase(name='x=${undef-~:~}', script="HOME=/home/bar\n\nx=~:${undef-~:~}\necho $x\n\n# Most shells agree on a differ...e), Assertion(type='stdout', value='/home/bar:~:~', shells=['osh', 'yash'], variant='OK')], line_number=137, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: x=${undef-~:~} (line 137)
E           
E           stdout mismatch:
E             expected: '/home/bar:/home/bar:/home/bar'
E             actual:   '/home/bar:/home/bar:~'
E           
E           Expected stdout: '/home/bar:/home/bar:/home/bar'
E           Actual stdout:   '/home/bar:/home/bar:~\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           HOME=/home/bar
E           
E           x=~:${undef-~:~}
E           echo $x
E           
E           # Most shells agree on a different behavior, but with the OSH parsing model,
E           # it's easier to agree with yash.  bash disagrees in a different way
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[type-compat.test.sh::declare -i with +=[L41]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d1eb0>
test_file = 'type-compat.test.sh'
test_case = TestCase(name='declare -i with +=', script='declare s\ns=\'1 \'\ns+=\' 2 \'  # string append\n\ndeclare -i i\ni=\'1 \'..., Assertion(type='stdout', value='[1  2 ]\n[1 2 ]\n[x 2 ]', shells=['osh'], variant='N-I')], line_number=41, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: declare -i with += (line 41)
E           
E           stdout mismatch:
E             expected: '[1  2 ]\n[3]\n[2]'
E             actual:   '[1  2 ]\n[12]\n[02]'
E           
E           Expected stdout: '[1  2 ]\n[3]\n[2]'
E           Actual stdout:   '[1  2 ]\n[12]\n[02]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare s
E           s='1 '
E           s+=' 2 '  # string append
E           
E           declare -i i
E           i='1 '
E           i+=' 2 '  # arith add
E           
E           declare -i j
E           j=x  # treated like zero
E           j+=' 2 '  # arith add
E           
E           echo "[$s]"
E           echo [$i]
E           echo [$j]
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[type-compat.test.sh::append in arith context[L90]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d2030>
test_file = 'type-compat.test.sh'
test_case = TestCase(name='append in arith context', script='declare s\n(( s=\'1 \'))\n(( s+=\' 2 \'))  # arith add\ndeclare -i i\...$s|$i|$j"', assertions=[Assertion(type='stdout', value='3|3|2', shells=None, variant=None)], line_number=90, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: append in arith context (line 90)
E           
E           stdout mismatch:
E             expected: '3|3|2'
E             actual:   '0|0|0'
E           
E           Expected stdout: '3|3|2'
E           Actual stdout:   '0|0|0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare s
E           (( s='1 '))
E           (( s+=' 2 '))  # arith add
E           declare -i i
E           (( i='1 ' ))
E           (( i+=' 2 ' ))
E           declare -i j
E           (( j='x ' ))  # treated like zero
E           (( j+=' 2 ' ))
E           echo "$s|$i|$j"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[type-compat.test.sh::declare array vs. associative array[L120]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d21b0>
test_file = 'type-compat.test.sh'
test_case = TestCase(name='declare array vs. associative array', script='# Hm I don\'t understand why the array only has one eleme...iant=None), Assertion(type='stdout', value="['0']\n['0']", shells=['osh'], variant='N-I')], line_number=120, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: declare array vs. associative array (line 120)
E           
E           stdout mismatch:
E             expected: "['1', '0', 'd']\n['2', 'a', 'c', 'b', 'd']"
E             actual:   "['0', 'a', 'c']\n['2', 'a', 'c', 'b', 'd']"
E           
E           Expected stdout: "['1', '0', 'd']\n['2', 'a', 'c', 'b', 'd']"
E           Actual stdout:   "['0', 'a', 'c']\n['2', 'a', 'c', 'b', 'd']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Hm I don't understand why the array only has one element.  I guess because
E           # index 0 is used twice?
E           declare -a 'array=([a]=b [c]=d)'
E           declare -A 'assoc=([a]=b [c]=d)'
E           argv.py "${#array[@]}" "${!array[@]}" "${array[@]}"
E           argv.py "${#assoc[@]}" "${!assoc[@]}" "${assoc[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[var-num.test.sh::$0 with stdin[L23]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d2570>
test_file = 'var-num.test.sh'
test_case = TestCase(name='$0 with stdin', script="echo 'echo $0' | $SH | grep -o 'sh$'", assertions=[Assertion(type='stdout', value='sh', shells=None, variant=None)], line_number=23, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $0 with stdin (line 23)
E           
E           stdout mismatch:
E             expected: 'sh'
E             actual:   ''
E           
E           Expected stdout: 'sh'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           echo 'echo $0' | $SH | grep -o 'sh$'
E           ---

tests/spec_tests/test_spec.py:218: Failed
______ TestBashSpecTests.test_spec_case[var-num.test.sh::$0 with -i[L27]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d2630>
test_file = 'var-num.test.sh'
test_case = TestCase(name='$0 with -i', script="echo 'echo $0' | $SH -i | grep -o 'sh$'", assertions=[Assertion(type='stdout', value='sh', shells=None, variant=None)], line_number=27, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $0 with -i (line 27)
E           
E           stdout mismatch:
E             expected: 'sh'
E             actual:   ''
E           
E           Expected stdout: 'sh'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           echo 'echo $0' | $SH -i | grep -o 'sh$'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::Case folding - multi code point[L60]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d2ab0>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='Case folding - multi code point', script='echo shell\nsmall=$\'\\u00DF\'\necho u ${small^}\necho U ${sm...ut', value='shell\nu ß\nU ß\nl ß\nL ß\n\npython2\nß\nß\n', shells=['bash'], variant='BUG')], line_number=60, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Case folding - multi code point (line 60)
E           
E           stdout mismatch:
E             expected: 'shell\nu ß\nU ß\nl ß\nL ß\n\npython2\nß\nß'
E             actual:   'shell\nu SS\nU SS\nl ß\nL ß\n\npython2'
E           
E           Expected stdout: 'shell\nu ß\nU ß\nl ß\nL ß\n\npython2\nß\nß\n'
E           Actual stdout:   'shell\nu SS\nU SS\nl ß\nL ß\n\npython2\n\n'
E           Expected stderr: None
E           Actual stderr:   'bash: python2: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo shell
E           small=$'\u00DF'
E           echo u ${small^}
E           echo U ${small^^}
E           
E           echo l ${small,}
E           echo L ${small,,}
E           echo
E           
E           echo python2
E           python2 -c '
E           small = u"\u00DF"
E           print(small.upper().encode("utf-8"))
E           print(small.lower().encode("utf-8"))
E           '
E           echo
E           
E           # Not in the container images, but python 3 DOES support it!
E           # This is moved to demo/survey-case-fold.sh
E           
E           if false; then
E           echo python3
E           python3 -c '
E           import sys
E           small = u"\u00DF"
E           sys.stdout.buffer.write(small.upper().encode("utf-8") + b"\n")
E           sys.stdout.buffer.write(small.lower().encode("utf-8") + b"\n")
E           '
E           fi
E           
E           if false; then
E             # Yes, supported
E             echo node.js
E           
E             nodejs -e '
E             var small = "\u00DF"
E             console.log(small.toUpperCase())
E             console.log(small.toLowerCase())
E             '
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::Lower Case with constant string (VERY WEIRD)[L142]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d2c30>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='Lower Case with constant string (VERY WEIRD)', script="x='AAA ABC DEF'\necho ${x,A}\necho ${x,,A}  # re...ns=[Assertion(type='stdout', value='aAA ABC DEF\naaa aBC DEF', shells=None, variant=None)], line_number=142, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Lower Case with constant string (VERY WEIRD) (line 142)
E           
E           stdout mismatch:
E             expected: 'aAA ABC DEF\naaa aBC DEF'
E             actual:   'aAA ABC DEF\naaa abc def'
E           
E           Expected stdout: 'aAA ABC DEF\naaa aBC DEF'
E           Actual stdout:   'aAA ABC DEF\naaa abc def\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x='AAA ABC DEF'
E           echo ${x,A}
E           echo ${x,,A}  # replaces every A only?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::Lower Case glob[L151]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d2cf0>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='Lower Case glob', script="# Hm with C.UTF-8, this does no case folding?\nexport LC_ALL=en_US.UTF-8\n\nx...assertions=[Assertion(type='stdout', value='ABC DEF\nABC DEF', shells=None, variant=None)], line_number=151, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Lower Case glob (line 151)
E           
E           stdout mismatch:
E             expected: 'ABC DEF\nABC DEF'
E             actual:   'aBC DEF\nabc def'
E           
E           Expected stdout: 'ABC DEF\nABC DEF'
E           Actual stdout:   'aBC DEF\nabc def\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Hm with C.UTF-8, this does no case folding?
E           export LC_ALL=en_US.UTF-8
E           
E           x='ABC DEF'
E           echo ${x,[d-f]}
E           echo ${x,,[d-f]}  # bash 4.4 fixed in bash 5.2.21
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::${x@u} U L - upper / lower case (bash 5.1 feature)[L164]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d2db0>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='${x@u} U L - upper / lower case (bash 5.1 feature)', script='# https://www.gnu.org/software/bash/manual...s=[Assertion(type='stdout', value='Abc DEF\nABC DEF\nabc def', shells=None, variant=None)], line_number=164, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${x@u} U L - upper / lower case (bash 5.1 feature) (line 164)
E           
E           stdout mismatch:
E             expected: 'Abc DEF\nABC DEF\nabc def'
E             actual:   ''
E           
E           Expected stdout: 'Abc DEF\nABC DEF\nabc def'
E           Actual stdout:   '\n\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html
E           
E           x='abc DEF'
E           
E           echo "${x@u}"
E           
E           echo "${x@U}"
E           
E           echo "${x@L}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::${array@Q} and ${array[@]@Q}[L190]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d2f30>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='${array@Q} and ${array[@]@Q}', script="array=(x 'y\\nz')\necho ${array[@]@Q}\necho ${array@Q}\necho ${a...None), Assertion(type='stdout', value="x $'y\\\\nz'\nx\nx", shells=['osh'], variant='OK')], line_number=190, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${array@Q} and ${array[@]@Q} (line 190)
E           
E           stdout mismatch:
E             expected: "'x' 'y\\nz'\n'x'\n'x'"
E             actual:   "'x y\\nz'\n'x'\n'x'"
E           
E           Expected stdout: "'x' 'y\\nz'\n'x'\n'x'"
E           Actual stdout:   "'x y\\nz'\n'x'\n'x'\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           array=(x 'y\nz')
E           echo ${array[@]@Q}
E           echo ${array@Q}
E           echo ${array@Q}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::${!prefix@} ${!prefix*} yields sorted array of var names[L206]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d2ff0>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='${!prefix@} ${!prefix*} yields sorted array of var names', script='ZOO=zoo\nZIP=zip\nZOOM=\'one two\'\n...']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']", shells=None, variant=None)], line_number=206, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${!prefix@} ${!prefix*} yields sorted array of var names (line 206)
E           
E           stdout mismatch:
E             expected: "['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']"
E             actual:   "['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z ZIP ZOO ZOOM']"
E           
E           Expected stdout: "['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']"
E           Actual stdout:   "['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z', 'ZIP', 'ZOO', 'ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z ZIP ZOO ZOOM']\n['Z ZIP ZOO ZOOM']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           ZOO=zoo
E           ZIP=zip
E           ZOOM='one two'
E           Z='three four'
E           
E           z=lower
E           
E           argv.py ${!Z*}
E           argv.py ${!Z@}
E           argv.py "${!Z*}"
E           argv.py "${!Z@}"
E           for i in 1 2; do argv.py ${!Z*}  ; done
E           for i in 1 2; do argv.py ${!Z@}  ; done
E           for i in 1 2; do argv.py "${!Z*}"; done
E           for i in 1 2; do argv.py "${!Z@}"; done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::${var@a} for attributes[L247]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d3170>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='${var@a} for attributes', script='array=(one two)\necho ${array@a}\ndeclare -r array=(one two)\necho ${...x@a}', assertions=[Assertion(type='stdout', value='a\nar\nrx', shells=None, variant=None)], line_number=247, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${var@a} for attributes (line 247)
E           
E           stdout mismatch:
E             expected: 'a\nar\nrx'
E             actual:   'a\nar\nr'
E           
E           Expected stdout: 'a\nar\nrx'
E           Actual stdout:   'a\nar\nr\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           array=(one two)
E           echo ${array@a}
E           declare -r array=(one two)
E           echo ${array@a}
E           declare -rx PYTHONPATH=hi
E           echo ${PYTHONPATH@a}
E           
E           # bash and osh differ here
E           #declare -rxn x=z
E           #echo ${x@a}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::undef and @P @Q @a[L270]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d32f0>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='undef and @P @Q @a', script="$SH -c 'echo ${undef@P}'\necho status=$?\n$SH -c 'echo ${undef@Q}'\necho s...ion(type='stdout', value='\nstatus=0\n\nstatus=0\n\nstatus=0', shells=None, variant=None)], line_number=270, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: undef and @P @Q @a (line 270)
E           
E           stdout mismatch:
E             expected: '\nstatus=0\n\nstatus=0\n\nstatus=0'
E             actual:   "\nstatus=0\n''\nstatus=0\n\nstatus=0"
E           
E           Expected stdout: '\nstatus=0\n\nstatus=0\n\nstatus=0'
E           Actual stdout:   "\nstatus=0\n''\nstatus=0\n\nstatus=0\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           $SH -c 'echo ${undef@P}'
E           echo status=$?
E           $SH -c 'echo ${undef@Q}'
E           echo status=$?
E           $SH -c 'echo ${undef@a}'
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::argv array and @P @Q @a[L287]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d33b0>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='argv array and @P @Q @a', script="$SH -c 'echo ${@@P}' dummy a b c\necho status=$?\n$SH -c 'echo ${@@Q}...', value="a b c\nstatus=0\n'a' 'b\\nc'\nstatus=0\n\nstatus=0", shells=None, variant=None)], line_number=287, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: argv array and @P @Q @a (line 287)
E           
E           stdout mismatch:
E             expected: "a b c\nstatus=0\n'a' 'b\\nc'\nstatus=0\n\nstatus=0"
E             actual:   "a b c\nstatus=0\n'a b\\nc'\nstatus=0\n\nstatus=0"
E           
E           Expected stdout: "a b c\nstatus=0\n'a' 'b\\nc'\nstatus=0\n\nstatus=0"
E           Actual stdout:   "a b c\nstatus=0\n'a b\\nc'\nstatus=0\n\nstatus=0\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           $SH -c 'echo ${@@P}' dummy a b c
E           echo status=$?
E           $SH -c 'echo ${@@Q}' dummy a 'b\nc'
E           echo status=$?
E           $SH -c 'echo ${@@a}' dummy a b c
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::assoc array and @P @Q @a[L304]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d3470>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='assoc array and @P @Q @a', script='# note: "y z" causes a bug!\n$SH -c \'declare -A A=(["x"]="y"); echo...out', value="- y\nstatus=0\n- 'y'\nstatus=0\nA - A\nstatus=0", shells=None, variant=None)], line_number=304, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: assoc array and @P @Q @a (line 304)
E           
E           stdout mismatch:
E             expected: "- y\nstatus=0\n- 'y'\nstatus=0\nA - A\nstatus=0"
E             actual:   "- y\nstatus=0\n'' - 'y'\nstatus=0\nA -\nstatus=0"
E           
E           Expected stdout: "- y\nstatus=0\n- 'y'\nstatus=0\nA - A\nstatus=0"
E           Actual stdout:   "- y\nstatus=0\n'' - 'y'\nstatus=0\nA -\nstatus=0\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # note: "y z" causes a bug!
E           $SH -c 'declare -A A=(["x"]="y"); echo ${A@P} - ${A[@]@P}'
E           echo status=$?
E           
E           # note: "y z" causes a bug!
E           $SH -c 'declare -A A=(["x"]="y"); echo ${A@Q} - ${A[@]@Q}' | sed 's/^- y$/- '\''y'\''/'
E           echo status=$?
E           
E           $SH -c 'declare -A A=(["x"]=y); echo ${A@a} - ${A[@]@a}'
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::${#var@X} is a parse error[L343]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d35f0>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='${#var@X} is a parse error', script='# note: "y z" causes a bug!\n$SH -c \'declare -A A=(["x"]="y"); ec...assertions=[Assertion(type='stdout', value='fail\nfail\nfail', shells=None, variant=None)], line_number=343, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${#var@X} is a parse error (line 343)
E           
E           stdout mismatch:
E             expected: 'fail\nfail\nfail'
E             actual:   '0\n0\n0'
E           
E           Expected stdout: 'fail\nfail\nfail'
E           Actual stdout:   '0\n0\n0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # note: "y z" causes a bug!
E           $SH -c 'declare -A A=(["x"]="y"); echo ${#A[@]@P}'
E           if test $? -ne 0; then echo fail; fi
E           
E           # note: "y z" causes a bug!
E           $SH -c 'declare -A A=(["x"]="y"); echo ${#A[@]@Q}'
E           if test $? -ne 0; then echo fail; fi
E           
E           $SH -c 'declare -A A=(["x"]=y); echo ${#A[@]@a}'
E           if test $? -ne 0; then echo fail; fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::undef vs. empty string in var ops[L384]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d3770>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='undef vs. empty string in var ops', script="empty=''\nx=x\n\necho ${x@Q} ${empty@Q} ${undef@Q} ${x@Q}\n...x' '' 'x'\n'x' '' 'x'\n'x' '' 'x'\nx='x' empty='' x='x'\nr r", shells=None, variant=None)], line_number=384, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: undef vs. empty string in var ops (line 384)
E           
E           stdout mismatch:
E             expected: "'x' '' 'x'\n'x' '' 'x'\n'x' '' 'x'\nx='x' empty='' x='x'\nr r"
E             actual:   "'x' '' '' 'x'\nx x\n\nx='x' empty='' undef='' x='x'\nr r"
E           
E           Expected stdout: "'x' '' 'x'\n'x' '' 'x'\n'x' '' 'x'\nx='x' empty='' x='x'\nr r"
E           Actual stdout:   "'x' '' '' 'x'\nx x\n\nx='x' empty='' undef='' x='x'\nr r\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           empty=''
E           x=x
E           
E           echo ${x@Q} ${empty@Q} ${undef@Q} ${x@Q}
E           
E           echo ${x@K} ${empty@K} ${undef@K} ${x@K}
E           
E           echo ${x@k} ${empty@k} ${undef@k} ${x@k}
E           
E           echo ${x@A} ${empty@A} ${undef@A} ${x@A}
E           
E           declare -r x
E           echo ${x@a} ${empty@a} ${undef@a} ${x@a}
E           
E           # x x
E           #echo ${x@E} ${empty@E} ${undef@E} ${x@E}
E           # x x
E           #echo ${x@P} ${empty@P} ${undef@P} ${x@P}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::-o nounset with var ops[L413]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d3830>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='-o nounset with var ops', script='set -u\n(echo ${undef@Q}); echo "stat: $?"\n(echo ${undef@P}); echo "...s=[Assertion(type='stdout', value='stat: 1\nstat: 1\nstat: 1', shells=None, variant=None)], line_number=413, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: -o nounset with var ops (line 413)
E           
E           stdout mismatch:
E             expected: 'stat: 1\nstat: 1\nstat: 1'
E             actual:   ''
E           
E           Expected stdout: 'stat: 1\nstat: 1\nstat: 1'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: undef: unbound variable\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           set -u
E           (echo ${undef@Q}); echo "stat: $?"
E           (echo ${undef@P}); echo "stat: $?"
E           (echo ${undef@a}); echo "stat: $?"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::${a[0]@a} and ${a@a}[L427]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d38f0>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='${a[0]@a} and ${a@a}', script='a=(1 2 3)\necho "attr = \'${a[0]@a}\'"\necho "attr = \'${a@a}\'"', assertions=[Assertion(type='stdout', value="attr = 'a'\nattr = 'a'", shells=None, variant=None)], line_number=427, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${a[0]@a} and ${a@a} (line 427)
E           
E           stdout mismatch:
E             expected: "attr = 'a'\nattr = 'a'"
E             actual:   "attr = ''\nattr = 'a'"
E           
E           Expected stdout: "attr = 'a'\nattr = 'a'"
E           Actual stdout:   "attr = ''\nattr = 'a'\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(1 2 3)
E           echo "attr = '${a[0]@a}'"
E           echo "attr = '${a@a}'"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::${!r@a} with r='a[0]' (attribute for indirect expansion of an array element)[L439]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d39b0>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name="${!r@a} with r='a[0]' (attribute for indirect expansion of an array element)", script="a=(1 2 3)\nr='a'...@a}", assertions=[Assertion(type='stdout', value='a\na\nA\nA', shells=None, variant=None)], line_number=439, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${!r@a} with r='a[0]' (attribute for indirect expansion of an array element) (line 439)
E           
E           stdout mismatch:
E             expected: 'a\na\nA\nA'
E             actual:   ''
E           
E           Expected stdout: 'a\na\nA\nA'
E           Actual stdout:   '\n\n\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(1 2 3)
E           r='a'
E           echo ${!r@a}
E           r='a[0]'
E           echo ${!r@a}
E           
E           declare -A d=([0]=foo [1]=bar)
E           r='d'
E           echo ${!r@a}
E           r='d[0]'
E           echo ${!r@a}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::Array expansion with nullary var op @Q[L460]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d3a70>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='Array expansion with nullary var op @Q', script='declare -a a=({1..9})\ndeclare -A A=([\'a\']=hello [\'...["\'hello\' \'world\' \'osh\' \'ysh\'"]\n[]\n[\'\']', shells=['just-bash'], variant='OK')], line_number=460, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Array expansion with nullary var op @Q (line 460)
E           
E           stdout mismatch:
E             expected: '["\'1\'", "\'2\'", "\'3\'", "\'4\'", "\'5\'", "\'6\'", "\'7\'", "\'8\'", "\'9\'"]\n["\'1\' \'2\' \'3\' \'4\' \'5\' \'6\' \'7\' \'8\' \'9\'"]\n["\'ysh\'", "\'osh\'", "\'world\'", "\'hello\'"]\n["\'ysh\' \'osh\' \'world\' \'hello\'"]\n[]\n[\'\']'
E             actual:   '["\'9\'"]\n["\'9\'"]\n["\'hello world osh ysh\'"]\n["\'hello world osh ysh\'"]\n["\'\'"]\n["\'\'"]'
E           
E           Expected stdout: '["\'1\'", "\'2\'", "\'3\'", "\'4\'", "\'5\'", "\'6\'", "\'7\'", "\'8\'", "\'9\'"]\n["\'1\' \'2\' \'3\' \'4\' \'5\' \'6\' \'7\' \'8\' \'9\'"]\n["\'ysh\'", "\'osh\'", "\'world\'", "\'hello\'"]\n["\'ysh\' \'osh\' \'world\' \'hello\'"]\n[]\n[\'\']'
E           Actual stdout:   '["\'9\'"]\n["\'9\'"]\n["\'hello world osh ysh\'"]\n["\'hello world osh ysh\'"]\n["\'\'"]\n["\'\'"]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -a a=({1..9})
E           declare -A A=(['a']=hello ['b']=world ['c']=osh ['d']=ysh)
E           
E           argv.py "${a[@]@Q}"
E           argv.py "${a[*]@Q}"
E           argv.py "${A[@]@Q}"
E           argv.py "${A[*]@Q}"
E           argv.py "${u[@]@Q}"
E           argv.py "${u[*]@Q}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::Array expansion with nullary var op @P[L498]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d3b30>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='Array expansion with nullary var op @P', script='declare -a a=({1..9})\ndeclare -A A=([\'a\']=hello [\'...h', 'world', 'hello']\n['ysh osh world hello']\n[]\n['']", shells=['bash'], variant='OK')], line_number=498, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Array expansion with nullary var op @P (line 498)
E           
E           stdout mismatch:
E             expected: "['1', '2', '3', '4', '5', '6', '7', '8', '9']\n['1 2 3 4 5 6 7 8 9']\n['ysh', 'osh', 'world', 'hello']\n['ysh osh world hello']\n[]\n['']"
E             actual:   "['9']\n['9']\n['hello world osh ysh']\n['hello world osh ysh']\n['']\n['']"
E           
E           Expected stdout: "['1', '2', '3', '4', '5', '6', '7', '8', '9']\n['1 2 3 4 5 6 7 8 9']\n['ysh', 'osh', 'world', 'hello']\n['ysh osh world hello']\n[]\n['']"
E           Actual stdout:   "['9']\n['9']\n['hello world osh ysh']\n['hello world osh ysh']\n['']\n['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -a a=({1..9})
E           declare -A A=(['a']=hello ['b']=world ['c']=osh ['d']=ysh)
E           
E           argv.py "${a[@]@P}"
E           argv.py "${a[*]@P}"
E           argv.py "${A[@]@P}"
E           argv.py "${A[*]@P}"
E           argv.py "${u[@]@P}"
E           argv.py "${u[*]@P}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-bash.test.sh::Array expansion with nullary var op @a[L528]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093d3bf0>
test_file = 'var-op-bash.test.sh'
test_case = TestCase(name='Array expansion with nullary var op @a', script='declare -a a=({1..9})\ndeclare -A A=([\'a\']=hello [\'...a a a a a a a']\n['A', 'A', 'A', 'A']\n['A A A A']\n[]\n['']", shells=None, variant=None)], line_number=528, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Array expansion with nullary var op @a (line 528)
E           
E           stdout mismatch:
E             expected: "['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']\n['a a a a a a a a a']\n['A', 'A', 'A', 'A']\n['A A A A']\n[]\n['']"
E             actual:   "['']\n['']\n['']\n['']\n['']\n['']"
E           
E           Expected stdout: "['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']\n['a a a a a a a a a']\n['A', 'A', 'A', 'A']\n['A A A A']\n[]\n['']"
E           Actual stdout:   "['']\n['']\n['']\n['']\n['']\n['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -a a=({1..9})
E           declare -A A=(['a']=hello ['b']=world ['c']=osh ['d']=ysh)
E           
E           argv.py "${a[@]@a}"
E           argv.py "${a[*]@a}"
E           argv.py "${A[@]@a}"
E           argv.py "${A[*]@a}"
E           argv.py "${u[@]@a}"
E           argv.py "${u[*]@a}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-len.test.sh::Length operator can't be followed by test operator[L55]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f00b0>
test_file = 'var-op-len.test.sh'
test_case = TestCase(name="Length operator can't be followed by test operator", script="echo ${#x-default}\n\nx=''\necho ${#x-defa... variant='BUG'), Assertion(type='stdout', value='0\n0\n3', shells=['dash'], variant='BUG')], line_number=55, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Length operator can't be followed by test operator (line 55)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   '0\n0\n0'
E           status mismatch: expected one of [2, 1], got 0
E           
E           Expected stdout: ''
E           Actual stdout:   '0\n0\n0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           echo ${#x-default}
E           
E           x=''
E           echo ${#x-default}
E           
E           x='foo'
E           echo ${#x-default}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-len.test.sh::${#s} respects LC_ALL - length in bytes or code points[L80]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f0170>
test_file = 'var-op-len.test.sh'
test_case = TestCase(name='${#s} respects LC_ALL - length in bytes or code points', script='case $SH in dash) exit ;; esac\n\n# Th...ion(type='stdout', value='len=2\nlen=2\n\nlen=3\nlen=3\n', shells=['mksh'], variant='BUG')], line_number=80, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${#s} respects LC_ALL - length in bytes or code points (line 80)
E           
E           stdout mismatch:
E             expected: 'len=1\nlen=2\n\nlen=1\nlen=4'
E             actual:   'len=1\nlen=1\n\nlen=1\nlen=1'
E           
E           Expected stdout: 'len=1\nlen=2\n\nlen=1\nlen=4\n'
E           Actual stdout:   'len=1\nlen=1\n\nlen=1\nlen=1\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           # This test case is sorta "infected" because spec-common.sh sets LC_ALL=C.UTF-8
E           #
E           # For some reason mksh behaves differently
E           #
E           # See demo/04-unicode.sh
E           
E           #echo $LC_ALL
E           unset LC_ALL 
E           
E           # note: this may depend on the CI machine config
E           LANG=en_US.UTF-8
E           
E           #LC_ALL=en_US.UTF-8
E           
E           for s in $'\u03bc' $'\U00010000'; do
E             LC_ALL=
E             echo "len=${#s}"
E           
E             LC_ALL=C
E             echo "len=${#s}"
E           
E             echo
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-patsub.test.sh::Pattern replacement[L7]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f0230>
test_file = 'var-op-patsub.test.sh'
test_case = TestCase(name='Pattern replacement', script='v=abcde\necho ${v/c*/XX}', assertions=[Assertion(type='stdout', value='abXX', shells=None, variant=None)], line_number=7, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Pattern replacement (line 7)
E           
E           stdout mismatch:
E             expected: 'abXX'
E             actual:   'abXXde'
E           
E           Expected stdout: 'abXX'
E           Actual stdout:   'abXXde\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           v=abcde
E           echo ${v/c*/XX}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-patsub.test.sh::Replace is longest match[L59]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f06b0>
test_file = 'var-op-patsub.test.sh'
test_case = TestCase(name='Replace is longest match', script="# If it were shortest, then you would just replace the first <html>\...]}", assertions=[Assertion(type='stdout', value='begin [] end', shells=None, variant=None)], line_number=59, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Replace is longest match (line 59)
E           
E           stdout mismatch:
E             expected: 'begin [] end'
E             actual:   'begin []</html> end'
E           
E           Expected stdout: 'begin [] end'
E           Actual stdout:   'begin []</html> end\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # If it were shortest, then you would just replace the first <html>
E           s='begin <html></html> end'
E           echo ${s/<*>/[]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-patsub.test.sh::Replace hard glob[L71]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f0830>
test_file = 'var-op-patsub.test.sh'
test_case = TestCase(name='Replace hard glob', script="s='aa*bb+cc'\necho ${s//\\**+/__}  # Literal *, then any sequence of charac...iteral +", assertions=[Assertion(type='stdout', value='aa__cc', shells=None, variant=None)], line_number=71, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Replace hard glob (line 71)
E           
E           stdout mismatch:
E             expected: 'aa__cc'
E             actual:   '__cc'
E           
E           Expected stdout: 'aa__cc'
E           Actual stdout:   '__cc\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           s='aa*bb+cc'
E           echo ${s//\**+/__}  # Literal *, then any sequence of characters, then literal +
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-patsub.test.sh::Confusing unquoted slash matches bash (and ash)[L104]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f0a70>
test_file = 'var-op-patsub.test.sh'
test_case = TestCase(name='Confusing unquoted slash matches bash (and ash)', script="x='/_/'\necho ${x////c}\n\necho ${x//'/'/c}",...iant='BUG'), Assertion(type='stdout', value='c_c\n/_/ /c', shells=['ash'], variant='BUG')], line_number=104, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Confusing unquoted slash matches bash (and ash) (line 104)
E           
E           stdout mismatch:
E             expected: 'c_c\nc_c'
E             actual:   '/c//c_/c//c\n/c//c_/c//c'
E           
E           Expected stdout: 'c_c\nc_c'
E           Actual stdout:   '/c//c_/c//c\n/c//c_/c//c\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x='/_/'
E           echo ${x////c}
E           
E           echo ${x//'/'/c}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-patsub.test.sh::Synthesized ${x///} bug (similar to above)[L127]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f0b30>
test_file = 'var-op-patsub.test.sh'
test_case = TestCase(name='Synthesized ${x///} bug (similar to above)', script="# found via test/parse-errors.sh\n\nx='slash / bra...biguous: slash brace } hi\nquoted:    slash / brace } hi', shells=['ash'], variant='BUG')], line_number=127, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Synthesized ${x///} bug (similar to above) (line 127)
E           
E           stdout mismatch:
E             expected: 'ambiguous: slash brace } hi\nquoted:    slash brace } hi'
E             actual:   'ambiguous: slash / brace } hi\nquoted:    slash / brace } hi'
E           
E           Expected stdout: 'ambiguous: slash brace } hi\nquoted:    slash brace } hi'
E           Actual stdout:   'ambiguous: slash / brace } hi\nquoted:    slash / brace } hi\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # found via test/parse-errors.sh
E           
E           x='slash / brace } hi'
E           echo 'ambiguous:' ${x///}
E           
E           echo 'quoted:   ' ${x//'/'}
E           
E           # Wow we have all combination here -- TERRIBLE
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-patsub.test.sh::Replace backslash[L170]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f0d70>
test_file = 'var-op-patsub.test.sh'
test_case = TestCase(name='Replace backslash', script='v=\'[\\f]\'\nx=\'\\f\'\necho ${v/"$x"/_}\n\n# mksh and zsh differ on this c...rtion(type='stdout', value='[_]\n[_]\n[\\_]\n[_]', shells=['mksh', 'zsh'], variant='BUG')], line_number=170, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Replace backslash (line 170)
E           
E           stdout mismatch:
E             expected: '[_]\n[\\_]\n[\\_]\n[_]'
E             actual:   '[_]\n[_]\n[\\_]\n[_]'
E           
E           Expected stdout: '[_]\n[\\_]\n[\\_]\n[_]'
E           Actual stdout:   '[_]\n[_]\n[\\_]\n[_]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           v='[\f]'
E           x='\f'
E           echo ${v/"$x"/_}
E           
E           # mksh and zsh differ on this case, but this is consistent with the fact that
E           # \f as a glob means 'f', not '\f'.  TODO: Warn that it's a bad glob?
E           # The canonical form is 'f'.
E           echo ${v/$x/_}
E           
E           echo ${v/\f/_}
E           echo ${v/\\f/_}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-patsub.test.sh::Substitute glob characters in pattern, quoted and unquoted[L205]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f0ef0>
test_file = 'var-op-patsub.test.sh'
test_case = TestCase(name='Substitute glob characters in pattern, quoted and unquoted', script='# INFINITE LOOP in ash!\ncase $SH ... variant=None), Assertion(type='stdout', value='a-b\na-b', shells=['zsh'], variant='BUG')], line_number=205, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Substitute glob characters in pattern, quoted and unquoted (line 205)
E           
E           stdout mismatch:
E             expected: 'a-b\n-'
E             actual:   '-------\n-------'
E           
E           Expected stdout: 'a-b\n-'
E           Actual stdout:   '-------\n-------\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # INFINITE LOOP in ash!
E           case $SH in ash) exit ;; esac
E           
E           g='*'
E           v='a*b'
E           echo ${v//"$g"/-}
E           echo ${v//$g/-}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-patsub.test.sh::When LC_ALL=C, pattern ? doesn't match multibyte character[L245]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f1070>
test_file = 'var-op-patsub.test.sh'
test_case = TestCase(name="When LC_ALL=C, pattern ? doesn't match multibyte character", script="export LC_ALL='C'\n\ns='_μ_ and _μ...nd _μ_\n_μ_ and _μ_\n\nfoo and foo\nfoo and _y_\n_x_ and foo', shells=None, variant=None)], line_number=245, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: When LC_ALL=C, pattern ? doesn't match multibyte character (line 245)
E           
E           stdout mismatch:
E             expected: '_μ_ and _μ_\n_μ_ and _μ_\n_μ_ and _μ_\n\nfoo and foo\nfoo and _y_\n_x_ and foo'
E             actual:   'foo and foo\nfoo and _μ_\n_μ_ and foo\n\nfoo and foo\nfoo and _y_\n_x_ and foo'
E           
E           Expected stdout: '_μ_ and _μ_\n_μ_ and _μ_\n_μ_ and _μ_\n\nfoo and foo\nfoo and _y_\n_x_ and foo'
E           Actual stdout:   'foo and foo\nfoo and _μ_\n_μ_ and foo\n\nfoo and foo\nfoo and _y_\n_x_ and foo\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           export LC_ALL='C'
E           
E           s='_μ_ and _μ_'
E           
E           # ? should match one char
E           
E           echo ${s//_?_/foo}  # all
E           echo ${s/#_?_/foo}  # left
E           echo ${s/%_?_/foo}  # right
E           echo
E           
E           a='_x_ and _y_'
E           
E           echo ${a//_?_/foo}  # all
E           echo ${a/#_?_/foo}  # left
E           echo ${a/%_?_/foo}  # right
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-patsub.test.sh::\\(\\) in pattern (regression)[L302]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f11f0>
test_file = 'var-op-patsub.test.sh'
test_case = TestCase(name='\\(\\) in pattern (regression)', script="# Not extended globs\nx='foo()' \necho 1 ${x//*\\(\\)/z}\necho...tions=[Assertion(type='stdout', value='1 z\n2 z\n3 fooz\n4 z', shells=None, variant=None)], line_number=302, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \(\) in pattern (regression) (line 302)
E           
E           stdout mismatch:
E             expected: '1 z\n2 z\n3 fooz\n4 z'
E             actual:   '1 zfzozoz(z)z\n2 zfzozoz(z)z\n3 fooz\n4 zfzozoz(z)z'
E           
E           Expected stdout: '1 z\n2 z\n3 fooz\n4 z'
E           Actual stdout:   '1 zfzozoz(z)z\n2 zfzozoz(z)z\n3 fooz\n4 zfzozoz(z)z\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Not extended globs
E           x='foo()' 
E           echo 1 ${x//*\(\)/z}
E           echo 2 ${x//*\(\)/z}
E           echo 3 ${x//\(\)/z}
E           echo 4 ${x//*\(\)/z}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-patsub.test.sh::patsub with single quotes and hyphen in character class (regression)[L318]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f12b0>
test_file = 'var-op-patsub.test.sh'
test_case = TestCase(name='patsub with single quotes and hyphen in character class (regression)', script="# from Crestwave's bf.ba...riant=None), Assertion(type='stdout', value='helloworld', shells=['mksh'], variant='BUG')], line_number=318, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: patsub with single quotes and hyphen in character class (regression) (line 318)
E           
E           stdout mismatch:
E             expected: '++--.,<>[]'
E             actual:   '^++--hello.,world<>[]'
E           
E           Expected stdout: '++--.,<>[]'
E           Actual stdout:   '^++--hello.,world<>[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # from Crestwave's bf.bash
E           
E           program='^++--hello.,world<>[]'
E           program=${program//[^'><+-.,[]']} 
E           echo $program
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-patsub.test.sh::patsub with [^]][L332]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f1370>
test_file = 'var-op-patsub.test.sh'
test_case = TestCase(name='patsub with [^]]', script="# This is a PARSING divergence.  In OSH we match [], rather than using POSIX...$pat/z}", assertions=[Assertion(type='stdout', value='ab^cd^', shells=None, variant=None)], line_number=332, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: patsub with [^]] (line 332)
E           
E           stdout mismatch:
E             expected: 'ab^cd^'
E             actual:   'zzzzzz'
E           
E           Expected stdout: 'ab^cd^'
E           Actual stdout:   'zzzzzz\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # This is a PARSING divergence.  In OSH we match [], rather than using POSIX
E           # rules!
E           
E           pat='[^]]'
E           s='ab^cd^'
E           echo ${s//$pat/z}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-patsub.test.sh::[a-z] Invalid range end is syntax error[L344]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f1430>
test_file = 'var-op-patsub.test.sh'
test_case = TestCase(name='[a-z] Invalid range end is syntax error', script="x=fooz\npat='[z-a]'  # Invalid range.  Other shells d...), Assertion(type='status', value=0, shells=['bash', 'mksh', 'zsh', 'ash'], variant='OK')], line_number=344, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: [a-z] Invalid range end is syntax error (line 344)
E           
E           Execution error: bad character range z-a at position 1
E           
E           
E           Script:
E           ---
E           x=fooz
E           pat='[z-a]'  # Invalid range.  Other shells don't catch it!
E           #pat='[a-y]'
E           echo ${x//$pat}
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-patsub.test.sh::Pattern is empty $foo$bar -- regression for infinite loop[L359]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f14f0>
test_file = 'var-op-patsub.test.sh'
test_case = TestCase(name='Pattern is empty $foo$bar -- regression for infinite loop', script='x=-foo-\n\necho ${x//$foo$bar/bar}\...e), Assertion(type='stdout', value='bar-barfbarobarobar-', shells=['zsh'], variant='BUG')], line_number=359, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Pattern is empty $foo$bar -- regression for infinite loop (line 359)
E           
E           stdout mismatch:
E             expected: '-foo-'
E             actual:   'bar-barfbarobarobar-bar'
E           
E           Expected stdout: '-foo-'
E           Actual stdout:   'bar-barfbarobarobar-bar\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x=-foo-
E           
E           echo ${x//$foo$bar/bar}
E           
E           
E           # feels like memory unsafety in ZSH
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-patsub.test.sh::Chromium from http://www.oilshell.org/blog/2016/11/07.html[L374]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f15b0>
test_file = 'var-op-patsub.test.sh'
test_case = TestCase(name='Chromium from http://www.oilshell.org/blog/2016/11/07.html', script="case $SH in zsh) exit ;; esac\n\nH...(type='stdout', value='/foo/bar/baz\n\\/foo\\/bar\\/baz', shells=['mksh'], variant='BUG')], line_number=374, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Chromium from http://www.oilshell.org/blog/2016/11/07.html (line 374)
E           
E           stdout mismatch:
E             expected: '\\/foo\\/bar\\/baz\n\\/foo\\/bar\\/baz'
E             actual:   '/\\///\\/f/\\/o/\\/o/\\///\\/b/\\/a/\\/r/\\///\\/b/\\/a/\\/z/\\/\n/\\///\\/f/\\/o/\\/o/\\///\\/b/\\/a/\\/r/\\///\\/b/\\/a/\\/z/\\/'
E           
E           Expected stdout: '\\/foo\\/bar\\/baz\n\\/foo\\/bar\\/baz'
E           Actual stdout:   '/\\///\\/f/\\/o/\\/o/\\///\\/b/\\/a/\\/r/\\///\\/b/\\/a/\\/z/\\/\n/\\///\\/f/\\/o/\\/o/\\///\\/b/\\/a/\\/r/\\///\\/b/\\/a/\\/z/\\/\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh) exit ;; esac
E           
E           HOST_PATH=/foo/bar/baz
E           echo ${HOST_PATH////\\/}
E           
E           # The way bash parses it
E           echo ${HOST_PATH//'/'/\\/}
E           
E           
E           # zsh has crazy bugs
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-slice.test.sh::Cannot take length of substring slice[L14]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f17f0>
test_file = 'var-op-slice.test.sh'
test_case = TestCase(name='Cannot take length of substring slice', script='# These are runtime errors, but we could make them pars...ls=['zsh'], variant='OK'), Assertion(type='status', value=0, shells=['zsh'], variant='OK')], line_number=14, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Cannot take length of substring slice (line 14)
E           
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: None
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # These are runtime errors, but we could make them parse time errors.
E           v=abcde
E           echo ${#v:1:3}
E           # zsh actually implements this!
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-slice.test.sh::String slice with math[L76]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f1d30>
test_file = 'var-op-slice.test.sh'
test_case = TestCase(name='String slice with math', script='# I think this is the $(()) language inside?\ni=1\nfoo=abcdefg\necho $...-2 : i + 2}', assertions=[Assertion(type='stdout', value='def', shells=None, variant=None)], line_number=76, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: String slice with math (line 76)
E           
E           stdout mismatch:
E             expected: 'def'
E             actual:   ''
E           
E           Expected stdout: 'def'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # I think this is the $(()) language inside?
E           i=1
E           foo=abcdefg
E           echo ${foo: i+4-2 : i + 2}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-slice.test.sh::Slice with an index that's an array -- silent a[0] decay[L139]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f20f0>
test_file = 'var-op-slice.test.sh'
test_case = TestCase(name="Slice with an index that's an array -- silent a[0] decay", script='i=(3 4 5)\nmystr=abcdefg\necho assig..., variant='OK'), Assertion(type='stdout', value='assigned', shells=['zsh'], variant='OK')], line_number=139, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Slice with an index that's an array -- silent a[0] decay (line 139)
E           
E           stdout mismatch:
E             expected: 'assigned\nde'
E             actual:   'assigned'
E           
E           Expected stdout: 'assigned\nde'
E           Actual stdout:   'assigned\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           i=(3 4 5)
E           mystr=abcdefg
E           echo assigned
E           echo ${mystr:$i:2}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-slice.test.sh::Slice with an assoc array[L155]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f21b0>
test_file = 'var-op-slice.test.sh'
test_case = TestCase(name='Slice with an assoc array', script="declare -A A=(['5']=3 ['6']=4)\nmystr=abcdefg\necho assigned\necho ..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='N-I')], line_number=155, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Slice with an assoc array (line 155)
E           
E           stdout mismatch:
E             expected: 'assigned\nab'
E             actual:   'assigned'
E           
E           Expected stdout: 'assigned\nab'
E           Actual stdout:   'assigned\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A A=(['5']=3 ['6']=4)
E           mystr=abcdefg
E           echo assigned
E           echo ${mystr:$A:2}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-slice.test.sh::Simple ${@:offset}[L170]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f2270>
test_file = 'var-op-slice.test.sh'
test_case = TestCase(name='Simple ${@:offset}', script='set -- 4 5 6\n\nresult=$(argv.py ${@:0})\necho ${result//"$0"/\'SHELL\'}\n...ksh'], variant='N-I'), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=170, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Simple ${@:offset} (line 170)
E           
E           stdout mismatch:
E             expected: "['SHELL', '4', '5', '6']\n['4', '5', '6']\n['5', '6']"
E             actual:   "['4', '5', '6']\n['5', '6']\n['5', '6']"
E           
E           Expected stdout: "['SHELL', '4', '5', '6']\n['4', '5', '6']\n['5', '6']"
E           Actual stdout:   "['4', '5', '6']\n['5', '6']\n['5', '6']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- 4 5 6
E           
E           result=$(argv.py ${@:0})
E           echo ${result//"$0"/'SHELL'}
E           
E           argv.py ${@:1}
E           argv.py ${@:2}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-slice.test.sh::${@:offset} and ${*:offset}[L191]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f2330>
test_file = 'var-op-slice.test.sh'
test_case = TestCase(name='${@:offset} and ${*:offset}', script='case $SH in zsh) return ;; esac  # zsh is very different\n\nargv....], variant='N-I'), Assertion(type='stdout-json', value='', shells=['zsh'], variant='BUG')], line_number=191, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${@:offset} and ${*:offset} (line 191)
E           
E           stdout mismatch:
E             expected: "['-SHELL', 'a', '1', 'b', '2', 'c', '3-']\n['-a', '1', 'b', '2', 'c', '3-']\n['-c', '3-']\n['--']\n['--']\n['-SHELL', 'a', '1', 'b', '2', 'c', '3-']\n['-a', '1', 'b', '2', 'c', '3-']\n['-c', '3-']\n['--']\n['--']\n['-SHELL a 1 b 2 c 3-']\n['-a 1 b 2 c 3-']\n['-c 3-']\n['--']\n['--']\n['-SHELL', 'a 1', 'b 2', 'c 3-']\n['-a 1', 'b 2', 'c 3-']\n['-c 3-']\n['--']\n['--']"
E             actual:   "['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']"
E           
E           Expected stdout: "['-SHELL', 'a', '1', 'b', '2', 'c', '3-']\n['-a', '1', 'b', '2', 'c', '3-']\n['-c', '3-']\n['--']\n['--']\n['-SHELL', 'a', '1', 'b', '2', 'c', '3-']\n['-a', '1', 'b', '2', 'c', '3-']\n['-c', '3-']\n['--']\n['--']\n['-SHELL a 1 b 2 c 3-']\n['-a 1 b 2 c 3-']\n['-c 3-']\n['--']\n['--']\n['-SHELL', 'a 1', 'b 2', 'c 3-']\n['-a 1', 'b 2', 'c 3-']\n['-c 3-']\n['--']\n['--']"
E           Actual stdout:   "['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh) return ;; esac  # zsh is very different
E           
E           argv.shell-name-checked () {
E             argv.py "${@//$0/SHELL}"
E           }
E           fun() {
E             argv.shell-name-checked -${*:0}- # include $0
E             argv.shell-name-checked -${*:1}- # from $1
E             argv.shell-name-checked -${*:3}- # last parameter $3
E             argv.shell-name-checked -${*:4}- # empty
E             argv.shell-name-checked -${*:5}- # out of boundary
E             argv.shell-name-checked -${@:0}-
E             argv.shell-name-checked -${@:1}-
E             argv.shell-name-checked -${@:3}-
E             argv.shell-name-checked -${@:4}-
E             argv.shell-name-checked -${@:5}-
E             argv.shell-name-checked "-${*:0}-"
E             argv.shell-name-checked "-${*:1}-"
E             argv.shell-name-checked "-${*:3}-"
E             argv.shell-name-checked "-${*:4}-"
E             argv.shell-name-checked "-${*:5}-"
E             argv.shell-name-checked "-${@:0}-"
E             argv.shell-name-checked "-${@:1}-"
E             argv.shell-name-checked "-${@:3}-"
E             argv.shell-name-checked "-${@:4}-"
E             argv.shell-name-checked "-${@:5}-"
E           }
E           fun "a 1" "b 2" "c 3"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-slice.test.sh::${@:offset:length} and ${*:offset:length}[L246]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f23f0>
test_file = 'var-op-slice.test.sh'
test_case = TestCase(name='${@:offset:length} and ${*:offset:length}', script='case $SH in zsh) return ;; esac  # zsh is very diff...], variant='N-I'), Assertion(type='stdout-json', value='', shells=['zsh'], variant='BUG')], line_number=246, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${@:offset:length} and ${*:offset:length} (line 246)
E           
E           stdout mismatch:
E             expected: "['-SHELL', 'a', '1-']\n['-a', '1', 'b', '2-']\n['-c', '3-']\n['--']\n['--']\n['-SHELL', 'a', '1-']\n['-a', '1', 'b', '2-']\n['-c', '3-']\n['--']\n['--']\n['-SHELL a 1-']\n['-a 1 b 2-']\n['-c 3-']\n['--']\n['--']\n['-SHELL', 'a 1-']\n['-a 1', 'b 2-']\n['-c 3-']\n['--']\n['--']"
E             actual:   "['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']"
E           
E           Expected stdout: "['-SHELL', 'a', '1-']\n['-a', '1', 'b', '2-']\n['-c', '3-']\n['--']\n['--']\n['-SHELL', 'a', '1-']\n['-a', '1', 'b', '2-']\n['-c', '3-']\n['--']\n['--']\n['-SHELL a 1-']\n['-a 1 b 2-']\n['-c 3-']\n['--']\n['--']\n['-SHELL', 'a 1-']\n['-a 1', 'b 2-']\n['-c 3-']\n['--']\n['--']"
E           Actual stdout:   "['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n['--']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh) return ;; esac  # zsh is very different
E           
E           argv.shell-name-checked () {
E             argv.py "${@//$0/SHELL}"
E           }
E           fun() {
E             argv.shell-name-checked -${*:0:2}- # include $0
E             argv.shell-name-checked -${*:1:2}- # from $1
E             argv.shell-name-checked -${*:3:2}- # last parameter $3
E             argv.shell-name-checked -${*:4:2}- # empty
E             argv.shell-name-checked -${*:5:2}- # out of boundary
E             argv.shell-name-checked -${@:0:2}-
E             argv.shell-name-checked -${@:1:2}-
E             argv.shell-name-checked -${@:3:2}-
E             argv.shell-name-checked -${@:4:2}-
E             argv.shell-name-checked -${@:5:2}-
E             argv.shell-name-checked "-${*:0:2}-"
E             argv.shell-name-checked "-${*:1:2}-"
E             argv.shell-name-checked "-${*:3:2}-"
E             argv.shell-name-checked "-${*:4:2}-"
E             argv.shell-name-checked "-${*:5:2}-"
E             argv.shell-name-checked "-${@:0:2}-"
E             argv.shell-name-checked "-${@:1:2}-"
E             argv.shell-name-checked "-${@:3:2}-"
E             argv.shell-name-checked "-${@:4:2}-"
E             argv.shell-name-checked "-${@:5:2}-"
E           }
E           fun "a 1" "b 2" "c 3"
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[var-op-slice.test.sh::${@:0:1}[L301]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f24b0>
test_file = 'var-op-slice.test.sh'
test_case = TestCase(name='${@:0:1}', script='set a b c\nresult=$(echo ${@:0:1})\necho ${result//"$0"/\'SHELL\'}', assertions=[Ass...s=None, variant=None), Assertion(type='stdout', value='', shells=['mksh'], variant='N-I')], line_number=301, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${@:0:1} (line 301)
E           
E           stdout mismatch:
E             expected: 'SHELL'
E             actual:   'a'
E           
E           Expected stdout: 'SHELL'
E           Actual stdout:   'a\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set a b c
E           result=$(echo ${@:0:1})
E           echo ${result//"$0"/'SHELL'}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-slice.test.sh::Permutations of implicit begin and length[L312]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f2570>
test_file = 'var-op-slice.test.sh'
test_case = TestCase(name='Permutations of implicit begin and length', script='array=(1 2 3)\n\nargv.py ${array[@]}\n\n# *** impli...'BUG'), Assertion(type='stdout', value="['1', '2', '3']", shells=['mksh'], variant='BUG')], line_number=312, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Permutations of implicit begin and length (line 312)
E           
E           stdout mismatch:
E             expected: "['1', '2', '3']\n['1', '2', '3']\n[]\n\n['1', '2', '3']\n['1', '2', '3']\n[]\n\n['123']\n['123']\n['']"
E             actual:   "['1', '2', '3']\n['1', '2', '3']\n[]\n\n['1', '2', '3']\n['2', '3']\n[]\n\n['123']\n['123']\n['']"
E           
E           Expected stdout: "['1', '2', '3']\n['1', '2', '3']\n[]\n\n['1', '2', '3']\n['1', '2', '3']\n[]\n\n['123']\n['123']\n['']"
E           Actual stdout:   "['1', '2', '3']\n['1', '2', '3']\n[]\n\n['1', '2', '3']\n['2', '3']\n[]\n\n['123']\n['123']\n['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           array=(1 2 3)
E           
E           argv.py ${array[@]}
E           
E           # *** implicit length of N **
E           argv.py ${array[@]:0}
E           
E           # Why is this one not allowed
E           #argv.py ${array[@]:}
E           
E           # ** implicit length of ZERO **
E           #argv.py ${array[@]::}
E           #argv.py ${array[@]:0:}
E           
E           argv.py ${array[@]:0:0}
E           echo
E           
E           # Same agreed upon permutations
E           set -- 1 2 3
E           argv.py ${@}
E           argv.py ${@:1}
E           argv.py ${@:1:0}
E           echo
E           
E           s='123'
E           argv.py "${s}"
E           argv.py "${s:0}"
E           argv.py "${s:0:0}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-slice.test.sh::${array[@]:} vs ${array[@]: }  - bash and zsh inconsistent[L361]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f2630>
test_file = 'var-op-slice.test.sh'
test_case = TestCase(name='${array[@]:} vs ${array[@]: }  - bash and zsh inconsistent', script="$SH -c 'array=(1 2 3); argv.py ${a...'OK'), Assertion(type='stdout', value="['space', '123']", shells=['mksh'], variant='BUG')], line_number=361, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${array[@]:} vs ${array[@]: }  - bash and zsh inconsistent (line 361)
E           
E           stdout mismatch:
E             expected: "['space', '1', '2', '3']\n['space', '123']"
E             actual:   "['1', '2', '3']\n['space']\n['123']\n['space']"
E           
E           Expected stdout: "['space', '1', '2', '3']\n['space', '123']"
E           Actual stdout:   "['1', '2', '3']\n['space']\n['123']\n['space']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           $SH -c 'array=(1 2 3); argv.py ${array[@]:}'
E           $SH -c 'array=(1 2 3); argv.py space ${array[@]: }'
E           
E           $SH -c 's=123; argv.py ${s:}'
E           $SH -c 's=123; argv.py space ${s: }'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-slice.test.sh::${array[@]::} has implicit length of zero - for ble.sh[L386]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f26f0>
test_file = 'var-op-slice.test.sh'
test_case = TestCase(name='${array[@]::} has implicit length of zero - for ble.sh', script='# https://oilshell.zulipchat.com/#narr..., variant='OK'), Assertion(type='stdout', value='', shells=['mksh', 'zsh'], variant='OK')], line_number=386, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${array[@]::} has implicit length of zero - for ble.sh (line 386)
E           
E           stdout mismatch:
E             expected: '[]\n[]\n\n[]\n[]'
E             actual:   "['1', '2', '3']\n['1', '2', '3']\n\n['1', '2', '3']\n['1', '2', '3']"
E           
E           Expected stdout: '[]\n[]\n\n[]\n[]'
E           Actual stdout:   "['1', '2', '3']\n['1', '2', '3']\n\n['1', '2', '3']\n['1', '2', '3']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # https://oilshell.zulipchat.com/#narrow/stream/121540-oil-discuss/topic/.24.7Barr.5B.40.5D.3A.3A.7D.20in.20bash.20-.20is.20it.20documented.3F
E           
E           array=(1 2 3)
E           argv.py ${array[@]::}
E           argv.py ${array[@]:0:}
E           
E           echo
E           
E           set -- 1 2 3
E           argv.py ${@::}
E           argv.py ${@:0:}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-strip.test.sh::Remove const suffix is vectorized on $@ array[L22]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f29f0>
test_file = 'var-op-strip.test.sh'
test_case = TestCase(name='Remove const suffix is vectorized on $@ array', script='set -- 1a 2a 3a\nargv.py ${@%a}', assertions=[A...'mksh'], variant='N-I'), Assertion(type='status', value=1, shells=['mksh'], variant='N-I')], line_number=22, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Remove const suffix is vectorized on $@ array (line 22)
E           
E           stdout mismatch:
E             expected: "['1', '2', '3']"
E             actual:   "['1a', '2a', '3']"
E           
E           Expected stdout: "['1', '2', '3']"
E           Actual stdout:   "['1a', '2a', '3']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- 1a 2a 3a
E           argv.py ${@%a}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-strip.test.sh::Strip unicode prefix[L62]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f2f30>
test_file = 'var-op-strip.test.sh'
test_case = TestCase(name='Strip unicode prefix', script="show_hex() { od -A n -t c -t x1; }\n\n# NOTE: LANG is set to utf-8.\n# ?...'  \\n\n  0a\n\n  \\n\n  0a\n\n  \\n\n  0a\n\n  \\n\n  0a', shells=['zsh'], variant='BUG')], line_number=62, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Strip unicode prefix (line 62)
E           
E           stdout mismatch:
E             expected: '   -  \\n\n  2d  0a\n\n   -  \\n\n  2d  0a\n\n   -  \\n\n  2d  0a\n\n   -  \\n\n  2d  0a'
E             actual:   '  2d  0a\n\n  2d  0a\n\n  2d  0a\n\n  2d  0a'
E           
E           Expected stdout: '   -  \\n\n  2d  0a\n\n   -  \\n\n  2d  0a\n\n   -  \\n\n  2d  0a\n\n   -  \\n\n  2d  0a'
E           Actual stdout:   '  2d  0a\n\n  2d  0a\n\n  2d  0a\n\n  2d  0a\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show_hex() { od -A n -t c -t x1; }
E           
E           # NOTE: LANG is set to utf-8.
E           # ? is a glob that stands for one character
E           
E           v='μ-'
E           echo ${v#?} | show_hex
E           echo
E           echo ${v##?} | show_hex
E           echo
E           
E           v='-μ'
E           echo ${v%?} | show_hex
E           echo
E           echo ${v%%?} | show_hex
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-strip.test.sh::strip unquoted and quoted [[L158]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f32f0>
test_file = 'var-op-strip.test.sh'
test_case = TestCase(name='strip unquoted and quoted [', script='# I guess dash and mksh treat unquoted [ as an invalid glob?\nvar...['zsh'], variant='BUG'), Assertion(type='status', value=1, shells=['zsh'], variant='BUG')], line_number=158, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: strip unquoted and quoted [ (line 158)
E           
E           Execution error: unterminated character set at position 0
E           
E           
E           Script:
E           ---
E           # I guess dash and mksh treat unquoted [ as an invalid glob?
E           var='[foo]'
E           echo ${var#[}
E           echo ${var#"["}
E           echo "${var#[}"
E           echo "${var#"["}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-strip.test.sh::strip unquoted and quoted [][L180]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f33b0>
test_file = 'var-op-strip.test.sh'
test_case = TestCase(name='strip unquoted and quoted []', script='# LooksLikeGlob(\'[]\') is true\n# I guess dash, mksh, and zsh t...e='stdout', value='[]foo[]\nfoo[]\n[]foo[]\nfoo[]', shells=['mksh', 'zsh'], variant='OK')], line_number=180, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: strip unquoted and quoted [] (line 180)
E           
E           Execution error: unterminated character set at position 0
E           
E           
E           Script:
E           ---
E           # LooksLikeGlob('[]') is true
E           # I guess dash, mksh, and zsh treat unquoted [ as an invalid glob?
E           var='[]foo[]'
E           echo ${var#[]}
E           echo ${var#"[]"}
E           echo "${var#[]}"
E           echo "${var#"[]"}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-strip.test.sh::strip unquoted and quoted ?[L201]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f3470>
test_file = 'var-op-strip.test.sh'
test_case = TestCase(name='strip unquoted and quoted ?', script='var=\'[foo]\'\necho ${var#?}\necho ${var#"?"}\necho "${var#?}"\ne...ns=[Assertion(type='stdout', value='foo]\n[foo]\nfoo]\n[foo]', shells=None, variant=None)], line_number=201, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: strip unquoted and quoted ? (line 201)
E           
E           stdout mismatch:
E             expected: 'foo]\n[foo]\nfoo]\n[foo]'
E             actual:   'foo]\nfoo]\nfoo]\nfoo]'
E           
E           Expected stdout: 'foo]\n[foo]\nfoo]\n[foo]'
E           Actual stdout:   'foo]\nfoo]\nfoo]\nfoo]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           var='[foo]'
E           echo ${var#?}
E           echo ${var#"?"}
E           echo "${var#?}"
E           echo "${var#"?"}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-strip.test.sh::strip unquoted and quoted [a][L214]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f3530>
test_file = 'var-op-strip.test.sh'
test_case = TestCase(name='strip unquoted and quoted [a]', script='var=\'[a]foo[]\'\necho ${var#[a]}\necho ${var#"[a]"}\necho "${v...rtion(type='stdout', value='[a]foo[]\nfoo[]\n[a]foo[]\nfoo[]', shells=None, variant=None)], line_number=214, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: strip unquoted and quoted [a] (line 214)
E           
E           stdout mismatch:
E             expected: '[a]foo[]\nfoo[]\n[a]foo[]\nfoo[]'
E             actual:   '[a]foo[]\n[a]foo[]\n[a]foo[]\n[a]foo[]'
E           
E           Expected stdout: '[a]foo[]\nfoo[]\n[a]foo[]\nfoo[]'
E           Actual stdout:   '[a]foo[]\n[a]foo[]\n[a]foo[]\n[a]foo[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           var='[a]foo[]'
E           echo ${var#[a]}
E           echo ${var#"[a]"}
E           echo "${var#[a]}"
E           echo "${var#"[a]"}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-strip.test.sh::Nested % and # operators (bug reported by Crestwave)[L227]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f35f0>
test_file = 'var-op-strip.test.sh'
test_case = TestCase(name='Nested % and # operators (bug reported by Crestwave)', script='var=$\'\\n\'\nargv.py "${var#?}"\nargv.p...value="['\\\\n']\n['$\\\\n']\n['$']\n['']\n['a']\n['a']", shells=['dash'], variant='N-I')], line_number=227, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Nested % and # operators (bug reported by Crestwave) (line 227)
E           
E           stdout mismatch:
E             expected: "['']\n['\\n']\n['\\n']\n['']\n['a']\n['a']"
E             actual:   "['\n']\n['\n']\n['']\n['']\n['a']\n['a']"
E           
E           Expected stdout: "['']\n['\\n']\n['\\n']\n['']\n['a']\n['a']"
E           Actual stdout:   "['\n']\n['\n']\n['']\n['']\n['a']\n['a']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           var=$'\n'
E           argv.py "${var#?}"
E           argv.py "${var%''}"
E           argv.py "${var%"${var#?}"}"
E           var='a'
E           argv.py "${var#?}"
E           argv.py "${var%''}"
E           argv.py "${var%"${var#?}"}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-strip.test.sh::strip * (bug regression)[L253]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f36b0>
test_file = 'var-op-strip.test.sh'
test_case = TestCase(name='strip * (bug regression)', script='x=abc\nargv.py "${x#*}"\nargv.py "${x##*}"\nargv.py "${x%*}"\nargv.p...ertion(type='stdout', value="['abc']\n['']\n['ab']\n['']", shells=['zsh'], variant='BUG')], line_number=253, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: strip * (bug regression) (line 253)
E           
E           stdout mismatch:
E             expected: "['abc']\n['']\n['abc']\n['']"
E             actual:   "['abc']\n['']\n['ab']\n['']"
E           
E           Expected stdout: "['abc']\n['']\n['abc']\n['']"
E           Actual stdout:   "['abc']\n['']\n['ab']\n['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x=abc
E           argv.py "${x#*}"
E           argv.py "${x##*}"
E           argv.py "${x%*}"
E           argv.py "${x%%*}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-strip.test.sh::strip none unicode[L329]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f3a70>
test_file = 'var-op-strip.test.sh'
test_case = TestCase(name='strip none unicode', script='x=μabcμ\nargv.py "${x#}"\nargv.py "${x##}"\nargv.py "${x%}"\nargv.py "${x%...']\n['\\xce\\xbcabc\\xce\\xbc']\n['\\xce\\xbcabc\\xce\\xbc']", shells=None, variant=None)], line_number=329, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: strip none unicode (line 329)
E           
E           stdout mismatch:
E             expected: "['\\xce\\xbcabc\\xce\\xbc']\n['\\xce\\xbcabc\\xce\\xbc']\n['\\xce\\xbcabc\\xce\\xbc']\n['\\xce\\xbcabc\\xce\\xbc']"
E             actual:   "['μabcμ']\n['μabcμ']\n['μabcμ']\n['μabcμ']"
E           
E           Expected stdout: "['\\xce\\xbcabc\\xce\\xbc']\n['\\xce\\xbcabc\\xce\\xbc']\n['\\xce\\xbcabc\\xce\\xbc']\n['\\xce\\xbcabc\\xce\\xbc']"
E           Actual stdout:   "['μabcμ']\n['μabcμ']\n['μabcμ']\n['μabcμ']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x=μabcμ
E           argv.py "${x#}"
E           argv.py "${x##}"
E           argv.py "${x%}"
E           argv.py "${x%%}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-strip.test.sh::Strip Right Brace (#702)[L342]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f3b30>
test_file = 'var-op-strip.test.sh'
test_case = TestCase(name='Strip Right Brace (#702)', script='var=\'$foo\'\necho 1 "${var#$foo}"\necho 2 "${var#\\$foo}"\n\nvar=\'...ype='stdout', value="1 $foo\n2 \n10 }}\n11 \n12 }'}\n13 ", shells=['zsh'], variant='BUG')], line_number=342, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Strip Right Brace (#702) (line 342)
E           
E           stdout mismatch:
E             expected: '1 $foo\n2\n10 }}\n11\n12\n13'
E             actual:   "1 $foo\n2\n10 }}\n11 }}\n12 }'}\n13 }}"
E           
E           Expected stdout: '1 $foo\n2 \n10 }}\n11 \n12 \n13 '
E           Actual stdout:   "1 $foo\n2 \n10 }}\n11 }}\n12 }'}\n13 }}\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           var='$foo'
E           echo 1 "${var#$foo}"
E           echo 2 "${var#\$foo}"
E           
E           var='}'
E           echo 10 "${var#}}"
E           echo 11 "${var#\}}"
E           echo 12 "${var#'}'}"
E           echo 13 "${var#"}"}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-strip.test.sh::\\(\\) in pattern (regression)[L369]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1093f3bf0>
test_file = 'var-op-strip.test.sh'
test_case = TestCase(name='\\(\\) in pattern (regression)', script="x='foo()' \necho 1 ${x%*\\(\\)}\necho 2 ${x%%*\\(\\)}\necho 3 ..., assertions=[Assertion(type='stdout', value='1 foo\n2\n3\n4', shells=None, variant=None)], line_number=369, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \(\) in pattern (regression) (line 369)
E           
E           stdout mismatch:
E             expected: '1 foo\n2\n3\n4'
E             actual:   '1 foo()\n2 foo()\n3 foo()\n4 foo()'
E           
E           Expected stdout: '1 foo\n2\n3\n4'
E           Actual stdout:   '1 foo()\n2 foo()\n3 foo()\n4 foo()\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x='foo()' 
E           echo 1 ${x%*\(\)}
E           echo 2 ${x%%*\(\)}
E           echo 3 ${x#*\(\)}
E           echo 4 ${x##*\(\)}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-test.test.sh::Nix idiom ${!hooksSlice+"${!hooksSlice}"} - was workaround for obsolete bash 4.3 bug[L263]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094188f0>
test_file = 'var-op-test.test.sh'
test_case = TestCase(name='Nix idiom ${!hooksSlice+"${!hooksSlice}"} - was workaround for obsolete bash 4.3 bug', script='case $SH...t=None), Assertion(type='stdout', value='', shells=['dash', 'mksh', 'zsh'], variant='OK')], line_number=263, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Nix idiom ${!hooksSlice+"${!hooksSlice}"} - was workaround for obsolete bash 4.3 bug (line 263)
E           
E           stdout mismatch:
E             expected: "[]\n[]\n[]\n['42']"
E             actual:   '[]\n[]\n[]\n[]'
E           
E           Expected stdout: "[]\n[]\n[]\n['42']"
E           Actual stdout:   '[]\n[]\n[]\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|mksh|zsh) exit ;; esac
E           
E           # https://oilshell.zulipchat.com/#narrow/stream/307442-nix/topic/Replacing.20bash.20with.20osh.20in.20Nixpkgs.20stdenv
E           
E           (argv.py ${!hooksSlice+"${!hooksSlice}"})
E           
E           hooksSlice=x
E           
E           argv.py ${!hooksSlice+"${!hooksSlice}"}
E           
E           declare -a hookSlice=()
E           
E           argv.py ${!hooksSlice+"${!hooksSlice}"}
E           
E           foo=42
E           bar=43
E           
E           declare -a hooksSlice=(foo bar spam eggs)
E           
E           argv.py ${!hooksSlice+"${!hooksSlice}"}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-test.test.sh::array and - and +[L311]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109418a70>
test_file = 'var-op-test.test.sh'
test_case = TestCase(name='array and - and +', script='case $SH in dash) exit ;; esac\n\nshopt -s compat_array  # to refer to arra...['zsh'], variant='N-I'), Assertion(type='status', value=1, shells=['zsh'], variant='N-I')], line_number=311, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: array and - and + (line 311)
E           
E           stdout mismatch:
E             expected: "empty=minus\na1=\na1[0]=\na2= x\na3=3 4\n---\nempty=\na1=plus\na1[0]=plus\na2=plus\na3=plus\n---\nempty=\na1=plus\na2=plus\na3=plus\n---\n['minus']\n[]\n['']\n['plus']\n['']\n['plus']\n['', 'x']\n['plus']\n['3', '4']\n['plus']"
E             actual:   "empty=minus\na1=\na1[0]=minus\na2= x\na3=3 4\n---\nempty=\na1=plus\na1[0]=\na2=plus\na3=plus\n---\nempty=plus\na1=plus\na2=plus\na3=plus\n---\n['minus']\n['']\n['']\n['plus']\n['minus']\n['']\n[' x']\n['plus']\n['3 4']\n['plus']"
E           
E           Expected stdout: "empty=minus\na1=\na1[0]=\na2= x\na3=3 4\n---\nempty=\na1=plus\na1[0]=plus\na2=plus\na3=plus\n---\nempty=\na1=plus\na2=plus\na3=plus\n---\n['minus']\n[]\n['']\n['plus']\n['']\n['plus']\n['', 'x']\n['plus']\n['3', '4']\n['plus']"
E           Actual stdout:   "empty=minus\na1=\na1[0]=minus\na2= x\na3=3 4\n---\nempty=\na1=plus\na1[0]=\na2=plus\na3=plus\n---\nempty=plus\na1=plus\na2=plus\na3=plus\n---\n['minus']\n['']\n['']\n['plus']\n['minus']\n['']\n[' x']\n['plus']\n['3 4']\n['plus']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           shopt -s compat_array  # to refer to array as scalar
E           
E           empty=()
E           a1=('')
E           a2=('' x)
E           a3=(3 4)
E           echo empty=${empty[@]-minus}
E           echo a1=${a1[@]-minus}
E           echo a1[0]=${a1[0]-minus}
E           echo a2=${a2[@]-minus}
E           echo a3=${a3[@]-minus}
E           echo ---
E           
E           echo empty=${empty[@]+plus}
E           echo a1=${a1[@]+plus}
E           echo a1[0]=${a1[0]+plus}
E           echo a2=${a2[@]+plus}
E           echo a3=${a3[@]+plus}
E           echo ---
E           
E           echo empty=${empty+plus}
E           echo a1=${a1+plus}
E           echo a2=${a2+plus}
E           echo a3=${a3+plus}
E           echo ---
E           
E           # Test quoted arrays too
E           argv.py "${empty[@]-minus}"
E           argv.py "${empty[@]+plus}"
E           argv.py "${a1[@]-minus}"
E           argv.py "${a1[@]+plus}"
E           argv.py "${a1[0]-minus}"
E           argv.py "${a1[0]+plus}"
E           argv.py "${a2[@]-minus}"
E           argv.py "${a2[@]+plus}"
E           argv.py "${a3[@]-minus}"
E           argv.py "${a3[@]+plus}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-test.test.sh::$@ ("") and - and +[L407]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109418bf0>
test_file = 'var-op-test.test.sh'
test_case = TestCase(name='$@ ("") and - and +', script='set -- ""\necho argv=${@-minus}\necho argv=${@+plus}\necho argv=${@:-minu...(type='stdout', value='argv=\nargv=plus\nargv=\nargv=plus', shells=['zsh'], variant='OK')], line_number=407, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $@ ("") and - and + (line 407)
E           
E           stdout mismatch:
E             expected: 'argv=\nargv=plus\nargv=minus\nargv='
E             actual:   'argv=minus\nargv=\nargv=minus\nargv='
E           
E           Expected stdout: 'argv=\nargv=plus\nargv=minus\nargv='
E           Actual stdout:   'argv=minus\nargv=\nargv=minus\nargv=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- ""
E           echo argv=${@-minus}
E           echo argv=${@+plus}
E           echo argv=${@:-minus}
E           echo argv=${@:+plus}
E           
E           # Zsh treats $@ as an array unlike Bash converting it to a string by joining it
E           # with a space.
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-test.test.sh::$@ ("" "") and - and +[L430]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109418cb0>
test_file = 'var-op-test.test.sh'
test_case = TestCase(name='$@ ("" "") and - and +', script='set -- "" ""\necho argv=${@-minus}\necho argv=${@+plus}\necho argv=${@...ion(type='stdout', value='argv=\nargv=plus\nargv=\nargv=plus', shells=None, variant=None)], line_number=430, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $@ ("" "") and - and + (line 430)
E           
E           stdout mismatch:
E             expected: 'argv=\nargv=plus\nargv=\nargv=plus'
E             actual:   'argv=minus\nargv=\nargv=minus\nargv='
E           
E           Expected stdout: 'argv=\nargv=plus\nargv=\nargv=plus'
E           Actual stdout:   'argv=minus\nargv=\nargv=minus\nargv=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- "" ""
E           echo argv=${@-minus}
E           echo argv=${@+plus}
E           echo argv=${@:-minus}
E           echo argv=${@:+plus}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-test.test.sh::$* ("" "") and - and + (IFS=)[L443]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109418d70>
test_file = 'var-op-test.test.sh'
test_case = TestCase(name='$* ("" "") and - and + (IFS=)', script='set -- "" ""\nIFS=\necho argv=${*-minus}\necho argv=${*+plus}\n...pe='stdout', value='argv=\nargv=plus\nargv=minus\nargv=', shells=['mksh'], variant='BUG')], line_number=443, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $* ("" "") and - and + (IFS=) (line 443)
E           
E           stdout mismatch:
E             expected: 'argv=\nargv=plus\nargv=\nargv=plus'
E             actual:   'argv=minus\nargv=\nargv=minus\nargv='
E           
E           Expected stdout: 'argv=\nargv=plus\nargv=\nargv=plus'
E           Actual stdout:   'argv=minus\nargv=\nargv=minus\nargv=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- "" ""
E           IFS=
E           echo argv=${*-minus}
E           echo argv=${*+plus}
E           echo argv=${*:-minus}
E           echo argv=${*:+plus}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-test.test.sh::"$*" ("" "") and - and + (IFS=)[L463]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109418e30>
test_file = 'var-op-test.test.sh'
test_case = TestCase(name='"$*" ("" "") and - and + (IFS=)', script='set -- "" ""\nIFS=\necho "argv=${*-minus}"\necho "argv=${*+pl...ion(type='stdout', value='argv=\nargv=plus\nargv=\nargv=plus', shells=None, variant=None)], line_number=463, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: "$*" ("" "") and - and + (IFS=) (line 463)
E           
E           stdout mismatch:
E             expected: 'argv=\nargv=plus\nargv=\nargv=plus'
E             actual:   'argv=minus\nargv=\nargv=minus\nargv='
E           
E           Expected stdout: 'argv=\nargv=plus\nargv=\nargv=plus'
E           Actual stdout:   'argv=minus\nargv=\nargv=minus\nargv=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- "" ""
E           IFS=
E           echo "argv=${*-minus}"
E           echo "argv=${*+plus}"
E           echo "argv=${*:-minus}"
E           echo "argv=${*:+plus}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-test.test.sh::array ${arr[0]=x}[L555]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094192b0>
test_file = 'var-op-test.test.sh'
test_case = TestCase(name='array ${arr[0]=x}', script='arr=()\necho ${#arr[@]}\n: ${arr[0]=x}\necho ${#arr[@]}', assertions=[Asser...zsh'], variant='N-I'), Assertion(type='stdout', value='0', shells=['zsh'], variant='N-I')], line_number=555, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: array ${arr[0]=x} (line 555)
E           
E           stdout mismatch:
E             expected: '0\n1'
E             actual:   '0\n0'
E           
E           Expected stdout: '0\n1'
E           Actual stdout:   '0\n0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           arr=()
E           echo ${#arr[@]}
E           : ${arr[0]=x}
E           echo ${#arr[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-test.test.sh::assoc array ${arr["k"]=x}[L571]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109419370>
test_file = 'var-op-test.test.sh'
test_case = TestCase(name='assoc array ${arr["k"]=x}', script="# note: this also works in zsh\n\ndeclare -A arr=()\necho ${#arr[@]..., variant='N-I'), Assertion(type='stdout-json', value='', shells=['mksh'], variant='N-I')], line_number=571, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: assoc array ${arr["k"]=x} (line 571)
E           
E           stdout mismatch:
E             expected: '0\n1'
E             actual:   '0\n0'
E           
E           Expected stdout: '0\n1'
E           Actual stdout:   '0\n0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # note: this also works in zsh
E           
E           declare -A arr=()
E           echo ${#arr[@]}
E           : ${arr['k']=x}
E           echo ${#arr[@]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[var-op-test.test.sh::"\\z" as arg[L587]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109419430>
test_file = 'var-op-test.test.sh'
test_case = TestCase(name='"\\z" as arg', script='echo "${undef-\\$}"\necho "${undef-\\(}"\necho "${undef-\\z}"\necho "${undef-\\"...one), Assertion(type='stdout', value='$\n(\nz\n"\n`\n\\', shells=['yash'], variant='BUG')], line_number=587, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: "\z" as arg (line 587)
E           
E           stdout mismatch:
E             expected: '$\n\\(\n\\z\n"\n`\n\\'
E             actual:   '$\n(\nz\n"\n`\n\\'
E           
E           Expected stdout: '$\n\\(\n\\z\n"\n`\n\\'
E           Actual stdout:   '$\n(\nz\n"\n`\n\\\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo "${undef-\$}"
E           echo "${undef-\(}"
E           echo "${undef-\z}"
E           echo "${undef-\"}"
E           echo "${undef-\`}"
E           echo "${undef-\\}"
E           # Note: this line terminates the quoting by ` not to confuse the text editor.
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[var-op-test.test.sh::"\\e" as arg[L613]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094194f0>
test_file = 'var-op-test.test.sh'
test_case = TestCase(name='"\\e" as arg', script='echo "${undef-\\e}"', assertions=[Assertion(type='stdout', value='\\e', shells=N...sh'], variant='BUG'), Assertion(type='stdout', value='e', shells=['yash'], variant='BUG')], line_number=613, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: "\e" as arg (line 613)
E           
E           stdout mismatch:
E             expected: '\\e'
E             actual:   'e'
E           
E           Expected stdout: '\\e'
E           Actual stdout:   'e\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo "${undef-\e}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-op-test.test.sh::op-test for ${a} and ${a[0]}[L622]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094195b0>
test_file = 'var-op-test.test.sh'
test_case = TestCase(name='op-test for ${a} and ${a[0]}', script='case $SH in dash) exit ;; esac\n\ntest-hyphen() {\n  echo "a   :...ariant=None), Assertion(type='stdout', value="['empty']", shells=['mksh'], variant='BUG')], line_number=622, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: op-test for ${a} and ${a[0]} (line 622)
E           
E           stdout mismatch:
E             expected: "ref=a[@]: 'no-colon' 'with-colon'\nref=a[*]: 'no-colon' 'with-colon'\nref=a[@]: '' ''\nref=a[*]: '' ''\nref=a[@]: ' ' ' '\nref=a[*]: ' ' ' '\nref=a[@]: ' ' ' '\nref=a[*]: '' ''"
E             actual:   "a   : '' 'with-colon'\na[0]: 'no-colon' 'with-colon'\na   : '' 'with-colon'\na[0]: 'no-colon' 'with-colon'\na   : '' 'with-colon'\na[0]: 'no-colon' 'with-colon'\na   : '' 'with-colon'\na[0]: 'no-colon' 'with-colon'"
E           
E           Expected stdout: "ref=a[@]: 'no-colon' 'with-colon'\nref=a[*]: 'no-colon' 'with-colon'\nref=a[@]: '' ''\nref=a[*]: '' ''\nref=a[@]: ' ' ' '\nref=a[*]: ' ' ' '\nref=a[@]: ' ' ' '\nref=a[*]: '' ''"
E           Actual stdout:   "a   : '' 'with-colon'\na[0]: 'no-colon' 'with-colon'\na   : '' 'with-colon'\na[0]: 'no-colon' 'with-colon'\na   : '' 'with-colon'\na[0]: 'no-colon' 'with-colon'\na   : '' 'with-colon'\na[0]: 'no-colon' 'with-colon'\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           test-hyphen() {
E             echo "a   : '${a-no-colon}' '${a:-with-colon}'"
E             echo "a[0]: '${a[0]-no-colon}' '${a[0]:-with-colon}'"
E           }
E           
E           a=()
E           test-hyphen
E           a=("")
E           test-hyphen
E           a=("" "")
E           test-hyphen
E           IFS=
E           test-hyphen
E           
E           
E           # Zsh's ${a} and ${a[@]} implement something different from the other shells'.
E           
E           
E           
E           # Bash 2.0..4.4 has a bug that "${a[@]:-xxx}" produces an empty string.  It
E           # seemed to consider a[@] and a[*] are non-empty when there is at least one
E           # element even if the element is empty.  This was fixed in Bash 5.0.
E           #
E           # ## BUG bash STDOUT:
E           # a[@]: 'no-colon' 'with-colon'
E           # a[*]: 'no-colon' 'with-colon'
E           # a[@]: '' ''
E           # a[*]: '' ''
E           # a[@]: ' ' ' '
E           # a[*]: ' ' ' '
E           # a[@]: ' ' ' '
E           # a[*]: '' ''
E           # ## END
E           
E           # Zsh's ${a} and ${a[@]} implement something different from the other shells'.
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[var-ref.test.sh::${!ref-default}[L17]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109419730>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='${!ref-default}', script="ref=x\necho x=${!ref-default}\n\nx=''\necho x=${!ref-default}\n\nx=foo\necho ...ertions=[Assertion(type='stdout', value='x=default\nx=\nx=foo', shells=None, variant=None)], line_number=17, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${!ref-default} (line 17)
E           
E           stdout mismatch:
E             expected: 'x=default\nx=\nx=foo'
E             actual:   'x=\nx=\nx='
E           
E           Expected stdout: 'x=default\nx=\nx=foo'
E           Actual stdout:   'x=\nx=\nx=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           ref=x
E           echo x=${!ref-default}
E           
E           x=''
E           echo x=${!ref-default}
E           
E           x=foo
E           echo x=${!ref-default}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[var-ref.test.sh::${!undef:-}[L33]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094197f0>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='${!undef:-}', script="# bash 4.4 gives empty string, but I feel like this could be an error\necho undef...=None, variant=None), Assertion(type='stdout', value='NOUNSET', shells=None, variant=None)], line_number=33, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${!undef:-} (line 33)
E           
E           stdout mismatch:
E             expected: 'NOUNSET'
E             actual:   'undef=\nundef=\nNOUNSET\nundef=\nundef='
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: 'NOUNSET'
E           Actual stdout:   'undef=\nundef=\nNOUNSET\nundef=\nundef=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash 4.4 gives empty string, but I feel like this could be an error
E           echo undef=${!undef-'default'}
E           echo undef=${!undef}
E           
E           set -u
E           echo NOUNSET
E           echo undef=${!undef-'default'}
E           echo undef=${!undef}
E           
E           
E           # Bash 4.4 had been generating an empty string, but it was fixed in Bash 5.0.
E           #
E           # ## BUG bash STDOUT:
E           # undef=default
E           # undef=
E           # NOUNSET
E           # undef=default
E           # ## END
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-ref.test.sh::${!a[@]-'default'} is legal but fails with more than one element[L98]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109419970>
test_file = 'var-ref.test.sh'
test_case = TestCase(name="${!a[@]-'default'} is legal but fails with more than one element", script='# bash allows this construct...ant=None), Assertion(type='stdout', value='status=1\nstatus=1', shells=None, variant=None)], line_number=98, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${!a[@]-'default'} is legal but fails with more than one element (line 98)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1'
E             actual:   "['']\nstatus=0\n['']\nstatus=0"
E           
E           Expected stdout: 'status=1\nstatus=1'
E           Actual stdout:   "['']\nstatus=0\n['']\nstatus=0\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash allows this construct, but the indirection fails when the array has more
E           # than one element because the variable name contains a space.  OSH originally
E           # made it an error unconditionally because [@] implies it's an array, so the
E           # behavior has been different from Bash when the array has a single element.
E           # We now changed it to follow Bash even when the array has a single element.
E           
E           (argv.py "${!a[@]-default}")
E           echo status=$?
E           
E           a=(x y z)
E           (argv.py "${!a[@]-default}")
E           echo status=$?
E           
E           # Bash 4.4 had been generating an empty string for ${!undef[@]-}, but this was
E           # fixed in Bash 5.0.
E           #
E           # ## BUG bash status: 0
E           # ## BUG bash STDOUT:
E           # ['default']
E           # status=0
E           # status=1
E           # ## END
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[var-ref.test.sh::var ref: 1, @, *[L149]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109419bb0>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='var ref: 1, @, *', script='set -- x y\nref=1; argv.py "${!ref}"\nref=@; argv.py "${!ref}"\nref=*; argv....=[Assertion(type='stdout', value="['x']\n['x', 'y']\n['x y']", shells=None, variant=None)], line_number=149, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: var ref: 1, @, * (line 149)
E           
E           stdout mismatch:
E             expected: "['x']\n['x', 'y']\n['x y']"
E             actual:   "['x']\n['x y']\n['x y']"
E           
E           Expected stdout: "['x']\n['x', 'y']\n['x y']"
E           Actual stdout:   "['x']\n['x y']\n['x y']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- x y
E           ref=1; argv.py "${!ref}"
E           ref=@; argv.py "${!ref}"
E           ref=*; argv.py "${!ref}"  # maybe_decay_array bug?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-ref.test.sh::Var ref, then assignment with ${ := }[L181]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109419df0>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='Var ref, then assignment with ${ := }', script='z=zz\nzz=\necho ${!z:=foo}\necho ${!z:=bar}', assertions=[Assertion(type='stdout', value='foo\nfoo', shells=None, variant=None)], line_number=181, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Var ref, then assignment with ${ := } (line 181)
E           
E           stdout mismatch:
E             expected: 'foo\nfoo'
E             actual:   ''
E           
E           Expected stdout: 'foo\nfoo'
E           Actual stdout:   '\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           z=zz
E           zz=
E           echo ${!z:=foo}
E           echo ${!z:=bar}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-ref.test.sh::Var ref, then error with ${ ? }[L191]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109419eb0>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='Var ref, then error with ${ ? }', script="w=ww\nww=\necho ${!w:?'my message'}\necho done", assertions=[...shells=None, variant=None), Assertion(type='stdout', value='', shells=None, variant=None)], line_number=191, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Var ref, then error with ${ ? } (line 191)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   '\ndone'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   '\ndone\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           w=ww
E           ww=
E           echo ${!w:?'my message'}
E           echo done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-ref.test.sh::Indirect expansion, THEN suffix operators[L200]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109419f70>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='Indirect expansion, THEN suffix operators', script='check_eq() {\n  [ "$1" = "$2" ] || { echo "$1 vs $2...\'\necho ok', assertions=[Assertion(type='stdout', value='ok', shells=None, variant=None)], line_number=200, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Indirect expansion, THEN suffix operators (line 200)
E           
E           stdout mismatch:
E             expected: 'ok'
E             actual:   'bad success: ${!!xd}\nbad success: ${!!x*}\nbad success: ${!!a[*]}\nbad success: ${!#x}\nbad success: ${!#a[@]}\n${!y:-foo} -> expected foo, got\n${!x:-foo} -> expected aaabcc, got\n${!x:?oops} -> expected aaabcc, got\n${!x:+foo} -> expected foo, got\n${!x:2} -> expected abcc, got\n${!x:2:2} -> expected ab, got\n${!x#*a} -> expected aabcc, got\n${!x%%c*} -> expected aaab, got\n${!x/a*b/d} -> expected dcc, got\n${!p@P} -> expected $ , got\nok'
E           
E           Expected stdout: 'ok'
E           Actual stdout:   'bad success: ${!!xd}\nbad success: ${!!x*}\nbad success: ${!!a[*]}\nbad success: ${!#x}\nbad success: ${!#a[@]}\n${!y:-foo} -> expected foo, got \n${!x:-foo} -> expected aaabcc, got \n${!x:?oops} -> expected aaabcc, got \n${!x:+foo} -> expected foo, got \n${!x:2} -> expected abcc, got \n${!x:2:2} -> expected ab, got \n${!x#*a} -> expected aabcc, got \n${!x%%c*} -> expected aaab, got \n${!x/a*b/d} -> expected dcc, got \n${!p@P} -> expected $ , got \nok\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           check_eq() {
E             [ "$1" = "$2" ] || { echo "$1 vs $2"; }
E           }
E           check_expand() {
E             val=$(eval "echo \"$1\"")
E             [ "$val" = "$2" ] || { echo "$1 -> expected $2, got $val"; }
E           }
E           check_err() {
E             e="$1"
E             msg=$(eval "$e" 2>&1) && echo "bad success: $e"
E             if test -n "$2"; then 
E               if [[ "$msg" != $2 ]]; then
E                 echo "Expected error: $e"
E                 echo "Got error     : $msg"
E               fi
E             fi
E           }
E           # Nearly everything in manual section 3.5.3 "Shell Parameter Expansion"
E           # is allowed after a !-indirection.
E           #
E           # Not allowed: any further prefix syntax.
E           x=xx; xx=aaabcc
E           xd=x
E           check_err '${!!xd}'
E           check_err '${!!x*}'
E           a=(asdf x)
E           check_err '${!!a[*]}'
E           check_err '${!#x}'
E           check_err '${!#a[@]}'
E           # And an array reference binds tighter in the syntax, so goes first;
E           # there's no way to spell "indirection, then array reference".
E           check_expand '${!a[1]}' xx
E           b=(aoeu a)
E           check_expand '${!b[1]}' asdf  # i.e. like !(b[1]), not (!b)[1]
E           #
E           # Allowed: apparently everything else.
E           y=yy; yy=
E           check_expand '${!y:-foo}' foo
E           check_expand '${!x:-foo}' aaabcc
E           
E           check_expand '${!x:?oops}' aaabcc
E           
E           check_expand '${!y:+foo}' ''
E           check_expand '${!x:+foo}' foo
E           
E           check_expand '${!x:2}' abcc
E           check_expand '${!x:2:2}' ab
E           
E           check_expand '${!x#*a}' aabcc
E           check_expand '${!x%%c*}' aaab
E           check_expand '${!x/a*b/d}' dcc
E           
E           # ^ operator not fully implemented in OSH
E           #check_expand '${!x^a}' Aaabcc
E           
E           p=pp; pp='\$ '
E           check_expand '${!p@P}' '$ '
E           echo ok
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-ref.test.sh::var ref TO array var, with subscripts[L313]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941a330>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='var ref TO array var, with subscripts', script='f() {\n  argv.py "${!1}"\n}\nf \'nonexistent[0]\'\narra...dout', value="['']\n['x']\n['z']\n['x', 'y', 'z']\n['x y z']", shells=None, variant=None)], line_number=313, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: var ref TO array var, with subscripts (line 313)
E           
E           stdout mismatch:
E             expected: "['']\n['x']\n['z']\n['x', 'y', 'z']\n['x y z']"
E             actual:   "['']\n['x']\n['z']\n['x y z']\n['x y z']"
E           
E           Expected stdout: "['']\n['x']\n['z']\n['x', 'y', 'z']\n['x y z']"
E           Actual stdout:   "['']\n['x']\n['z']\n['x y z']\n['x y z']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E             argv.py "${!1}"
E           }
E           f 'nonexistent[0]'
E           array=(x y z)
E           f 'array[0]'
E           f 'array[1+1]'
E           f 'array[@]'
E           f 'array[*]'
E           # Also associative arrays.
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-ref.test.sh::var ref TO assoc array a[key][L332]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941a3f0>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='var ref TO assoc array a[key]', script='shopt -s compat_array\n\ndeclare -A assoc=([ale]=bean [corn]=di...value='ref=\nref_SUB=bean\nref_SUB_QUOTED=bean\nref_SUB_BAD=', shells=None, variant=None)], line_number=332, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: var ref TO assoc array a[key] (line 332)
E           
E           stdout mismatch:
E             expected: 'ref=\nref_SUB=bean\nref_SUB_QUOTED=bean\nref_SUB_BAD='
E             actual:   'ref=\nref_SUB=\nref_SUB_QUOTED=\nref_SUB_BAD='
E           
E           Expected stdout: 'ref=\nref_SUB=bean\nref_SUB_QUOTED=bean\nref_SUB_BAD='
E           Actual stdout:   'ref=\nref_SUB=\nref_SUB_QUOTED=\nref_SUB_BAD=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s compat_array
E           
E           declare -A assoc=([ale]=bean [corn]=dip)
E           ref=assoc
E           #ref_AT='assoc[@]'
E           
E           # UNQUOTED doesn't work with the OSH parser
E           #ref_SUB='assoc[ale]'
E           ref_SUB='assoc["ale"]'
E           
E           ref_SUB_QUOTED='assoc["al"e]'
E           
E           ref_SUB_BAD='assoc["bad"]'
E           
E           echo ref=${!ref}  # compat_array: assoc is equivalent to assoc[0]
E           #echo ref_AT=${!ref_AT}
E           echo ref_SUB=${!ref_SUB}
E           echo ref_SUB_QUOTED=${!ref_SUB_QUOTED}
E           echo ref_SUB_BAD=${!ref_SUB_BAD}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-ref.test.sh::var ref TO array with arbitrary subscripts[L361]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941a4b0>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='var ref TO array with arbitrary subscripts', script='shopt -s eval_unsafe_arith compat_array\n\nf() {\n...rks: a[${c:-1}]\nworks: a[$(echo 1)]\nworks: a[$(( 3 - 2 ))]', shells=None, variant=None)], line_number=361, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: var ref TO array with arbitrary subscripts (line 361)
E           
E           stdout mismatch:
E             expected: 'works: a[1]\nworks: a[$b]\nworks: a[${c:-1}]\nworks: a[$(echo 1)]\nworks: a[$(( 3 - 2 ))]'
E             actual:   'works: a[1]\nworks: a[$b]'
E           
E           Expected stdout: 'works: a[1]\nworks: a[$b]\nworks: a[${c:-1}]\nworks: a[$(echo 1)]\nworks: a[$(( 3 - 2 ))]'
E           Actual stdout:   'works: a[1]\nworks: a[$b]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s eval_unsafe_arith compat_array
E           
E           f() {
E             local val=$(echo "${!1}")
E             if test "$val" = y; then 
E               echo "works: $1"
E             fi
E           }
E           # Warmup: nice plain array reference
E           a=(x y)
E           f 'a[1]'
E           #
E           # Not allowed:
E           # no brace expansion
E           f 'a[{1,0}]'  # operand expected
E           # no process substitution (but see command substitution below!)
E           f 'a[<(echo x)]'  # operand expected
E           # TODO word splitting seems interesting
E           aa="1 0"
E           f 'a[$aa]'  # 1 0: syntax error in expression (error token is "0")
E           # no filename globbing
E           f 'a[b*]'  # operand expected
E           f 'a[1"]'  # bad substitution
E           #
E           # Allowed: most everything else in section 3.5 "Shell Expansions".
E           # shell parameter expansion
E           b=1
E           f 'a[$b]'
E           f 'a[${c:-1}]'
E           # (... and presumably most of the other features there)
E           # command substitution, yikes!
E           f 'a[$(echo 1)]'
E           # arithmetic expansion
E           f 'a[$(( 3 - 2 ))]'
E           
E           # All of these are undocumented and probably shouldn't exist,
E           # though it's always possible some will turn up in the wild and
E           # we'll end up implementing them.
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-ref.test.sh::Bizarre tilde expansion in array index[L410]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941a570>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='Bizarre tilde expansion in array index', script="a=(x y)\nPWD=1\nref='a[~+]'\necho ${!ref}\n\n# Bash 4....n# y\n# ## END", assertions=[Assertion(type='status', value=1, shells=None, variant=None)], line_number=410, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Bizarre tilde expansion in array index (line 410)
E           
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: None
E           Actual stdout:   'x\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(x y)
E           PWD=1
E           ref='a[~+]'
E           echo ${!ref}
E           
E           # Bash 4.4 had a bug, which was fixed in Bash 5.0.
E           #
E           # ## BUG bash status: 0
E           # ## BUG bash STDOUT:
E           # y
E           # ## END
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-ref.test.sh::Indirect expansion TO fancy expansion features bash disallows[L424]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941a630>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='Indirect expansion TO fancy expansion features bash disallows', script='check_indir() {\n    result="${...ls=None, variant=None), Assertion(type='stdout', value='done', shells=None, variant=None)], line_number=424, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Indirect expansion TO fancy expansion features bash disallows (line 424)
E           
E           stdout mismatch:
E             expected: 'done'
E             actual:   '!x\n!a[0]\nx:-foo\nx:=foo\nx:?oops\nx:+yy\nx:0\nx:0:1\n!a@\n#x\nx/y/foo\nx@Q\ndone'
E           
E           Expected stdout: 'done'
E           Actual stdout:   '!x \n!a[0] \nx:-foo \nx:=foo \nx:?oops \nx:+yy \nx:0 \nx:0:1 \n!a@ \n#x \nx/y/foo \nx@Q \ndone\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           check_indir() {
E               result="${!1}"
E               desugared_result=$(eval 'echo "${'"$1"'}"')
E               [ "$2" = "$desugared_result" ] || { echo "$1 $desugared_result";  }
E           }
E           x=y
E           y=a
E           a=(x y)
E           declare -A aa
E           aa=([k]=r [l]=s)
E           # malformed array indexing
E           check_indir "a[0"
E           check_indir "aa[k"
E           # double indirection
E           check_indir "!x"      a
E           check_indir "!a[0]"   y
E           # apparently everything else in the manual under "Shell Parameter Expansion"
E           check_indir "x:-foo"  y
E           check_indir "x:=foo"  y
E           check_indir "x:?oops" y
E           check_indir "x:+yy"   yy
E           check_indir "x:0"     y
E           check_indir "x:0:1"   y
E           check_indir "!a@"    "a aa"
E           # (!a[@] is elsewhere)
E           check_indir "#x"      1
E           check_indir "x#y"
E           check_indir "x/y/foo" foo
E           check_indir "x@Q"     "'y'"
E           echo done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-ref.test.sh::${!OPTIND} (used by bash completion[L478]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941a870>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='${!OPTIND} (used by bash completion', script='set -- a b c\necho ${!OPTIND}\nf() {\n  local OPTIND=1\n ... x y z', assertions=[Assertion(type='stdout', value='a\nx\ny', shells=None, variant=None)], line_number=478, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${!OPTIND} (used by bash completion (line 478)
E           
E           stdout mismatch:
E             expected: 'a\nx\ny'
E             actual:   '\nx\ny'
E           
E           Expected stdout: 'a\nx\ny'
E           Actual stdout:   '\nx\ny\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- a b c
E           echo ${!OPTIND}
E           f() {
E             local OPTIND=1
E             echo ${!OPTIND}
E             local OPTIND=2
E             echo ${!OPTIND}
E           }
E           f x y z
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-ref.test.sh::Var Ref Code Injection $(tee PWNED)[L512]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941a9f0>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='Var Ref Code Injection $(tee PWNED)', script="typeset -a a\na=(42)\n\nx='a[$(echo 0 | tee PWNED)]'\n\ne...nt='BUG'), Assertion(type='stdout', value='42\nPWNED\n0', shells=['bash'], variant='BUG')], line_number=512, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Var Ref Code Injection $(tee PWNED) (line 512)
E           
E           stdout mismatch:
E             expected: '42\nPWNED\n0'
E             actual:   '42\nNOPE'
E           
E           Expected stdout: '42\nPWNED\n0'
E           Actual stdout:   '42\nNOPE\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset -a a
E           a=(42)
E           
E           x='a[$(echo 0 | tee PWNED)]'
E           
E           echo ${!x}
E           
E           if test -f PWNED; then
E             echo PWNED
E             cat PWNED
E           else
E             echo NOPE
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-ref.test.sh::${!array_ref:-set} and ${!array_ref:=assign}[L540]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941aab0>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='${!array_ref:-set} and ${!array_ref:=assign}', script='ref=\'a[@]\'\na=(\'\' \'\' \'\')\n\necho "==== c...====\n['', '', '']\n['', '', '']\n['', '', '']\n['', '', '']", shells=None, variant=None)], line_number=540, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ${!array_ref:-set} and ${!array_ref:=assign} (line 540)
E           
E           stdout mismatch:
E             expected: "==== check ====\n['', '', '']\n['', '', '']\n==== assign ====\n['', '', '']\n['', '', '']\n['', '', '']\n['', '', '']"
E             actual:   "==== check ====\n['']\n['  ']\n==== assign ====\n['']\n['  ']\n['  ']\n['', '', '']"
E           
E           Expected stdout: "==== check ====\n['', '', '']\n['', '', '']\n==== assign ====\n['', '', '']\n['', '', '']\n['', '', '']\n['', '', '']"
E           Actual stdout:   "==== check ====\n['']\n['  ']\n==== assign ====\n['']\n['  ']\n['  ']\n['', '', '']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           ref='a[@]'
E           a=('' '' '')
E           
E           echo "==== check ===="
E           
E           argv.py "${!ref:-set}"
E           argv.py "${a[@]:-set}"
E           
E           echo "==== assign ===="
E           
E           argv.py "${!ref:=assign}"
E           argv.py "${!ref}"
E           a=('' '' '') # revert the state in case it is modified
E           
E           argv.py "${a[@]:=assign}"
E           argv.py "${a[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-ref.test.sh::Array indirect expansion with suffix operators[L570]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941ab70>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='Array indirect expansion with suffix operators', script='declare -A ref=([\'dummy\']=v1)\nfunction test...\n['1', '2', '3']\n['set']\n['1', '2', '3']\n['1', '2', '3']", shells=None, variant=None)], line_number=570, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Array indirect expansion with suffix operators (line 570)
E           
E           stdout mismatch:
E             expected: "==== v1 ====\n['lue']\n['al']\n['value']\n['set']\n['value']\nv1=value\n==== v2 ====\n['']\n['']\n['empty']\n['']\n['assign']\nv2=assign\n==== a1 ====\n['']\n['']\n['empty']\n['']\n['assign']\n['assign']\n==== a2[0] ====\n['ement']\n['le']\n['element']\n['set']\n['element']\n['element']\n==== a3[@] ====\n['3']\n['2', '3']\n['1', '2', '3']\n['set']\n['1', '2', '3']\n['1', '2', '3']"
E             actual:   "==== v1 ====\n['']\n['']\n['']\n['']\n['']\nv1=value\n==== v2 ====\n['']\n['']\n['']\n['']\n['']\nv2=\n==== a1 ====\n['']\n['']\n['']\n['']\n['']\n[]\n==== a2[0] ====\n['']\n['']\n['']\n['']\n['']\n['element']\n==== a3[@] ====\n['']\n['']\n['']\n['']\n['']\n['1', '2', '3']"
E           
E           Expected stdout: "==== v1 ====\n['lue']\n['al']\n['value']\n['set']\n['value']\nv1=value\n==== v2 ====\n['']\n['']\n['empty']\n['']\n['assign']\nv2=assign\n==== a1 ====\n['']\n['']\n['empty']\n['']\n['assign']\n['assign']\n==== a2[0] ====\n['ement']\n['le']\n['element']\n['set']\n['element']\n['element']\n==== a3[@] ====\n['3']\n['2', '3']\n['1', '2', '3']\n['set']\n['1', '2', '3']\n['1', '2', '3']"
E           Actual stdout:   "==== v1 ====\n['']\n['']\n['']\n['']\n['']\nv1=value\n==== v2 ====\n['']\n['']\n['']\n['']\n['']\nv2=\n==== a1 ====\n['']\n['']\n['']\n['']\n['']\n[]\n==== a2[0] ====\n['']\n['']\n['']\n['']\n['']\n['element']\n==== a3[@] ====\n['']\n['']\n['']\n['']\n['']\n['1', '2', '3']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A ref=(['dummy']=v1)
E           function test-suffixes {
E             echo "==== $1 ===="
E             ref['dummy']=$1
E             argv.py "${!ref[@]:2}"
E             argv.py "${!ref[@]:1:2}"
E             argv.py "${!ref[@]:-empty}"
E             argv.py "${!ref[@]:+set}"
E             argv.py "${!ref[@]:=assign}"
E           }
E           
E           v1=value
E           test-suffixes v1
E           echo "v1=$v1"
E           
E           v2=
E           test-suffixes v2
E           echo "v2=$v2"
E           
E           a1=()
E           test-suffixes a1
E           argv.py "${a1[@]}"
E           
E           a2=(element)
E           test-suffixes 'a2[0]'
E           argv.py "${a2[@]}"
E           
E           a3=(1 2 3)
E           test-suffixes 'a3[@]'
E           argv.py "${a3[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-ref.test.sh::Array indirect expansion with replacements[L641]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941ac30>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='Array indirect expansion with replacements', script='declare -A ref=([\'dummy\']=v1)\nfunction test-rep...['', '', '']\n['', '', '']\n['1', '2', '3']\n['1', '2', '3']", shells=None, variant=None)], line_number=641, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Array indirect expansion with replacements (line 641)
E           
E           stdout mismatch:
E             expected: "==== v1 ====\n['alue']\n['valu']\n['vlu']\n['vxlux']\n==== v2 ====\n['']\n['']\n['']\n['']\n==== a1 ====\n['']\n['']\n['']\n['']\n==== a2[0] ====\n['lement']\n['elemen']\n['lmnt']\n['xlxmxnt']\n==== a3[@] ====\n['', '', '']\n['', '', '']\n['1', '2', '3']\n['1', '2', '3']"
E             actual:   "==== v1 ====\n['']\n['']\n['']\n['']\n==== v2 ====\n['']\n['']\n['']\n['']\n==== a1 ====\n['']\n['']\n['']\n['']\n==== a2[0] ====\n['']\n['']\n['']\n['']\n==== a3[@] ====\n['']\n['']\n['']\n['']"
E           
E           Expected stdout: "==== v1 ====\n['alue']\n['valu']\n['vlu']\n['vxlux']\n==== v2 ====\n['']\n['']\n['']\n['']\n==== a1 ====\n['']\n['']\n['']\n['']\n==== a2[0] ====\n['lement']\n['elemen']\n['lmnt']\n['xlxmxnt']\n==== a3[@] ====\n['', '', '']\n['', '', '']\n['1', '2', '3']\n['1', '2', '3']"
E           Actual stdout:   "==== v1 ====\n['']\n['']\n['']\n['']\n==== v2 ====\n['']\n['']\n['']\n['']\n==== a1 ====\n['']\n['']\n['']\n['']\n==== a2[0] ====\n['']\n['']\n['']\n['']\n==== a3[@] ====\n['']\n['']\n['']\n['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A ref=(['dummy']=v1)
E           function test-rep {
E             echo "==== $1 ===="
E             ref['dummy']=$1
E             argv.py "${!ref[@]#?}"
E             argv.py "${!ref[@]%?}"
E             argv.py "${!ref[@]//[a-f]}"
E             argv.py "${!ref[@]//[a-f]/x}"
E           }
E           
E           v1=value
E           test-rep v1
E           
E           v2=
E           test-rep v2
E           
E           a1=()
E           test-rep a1
E           
E           a2=(element)
E           test-rep 'a2[0]'
E           
E           a3=(1 2 3)
E           test-rep 'a3[@]'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-ref.test.sh::Array indirect expansion with @? conversion[L696]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941acf0>
test_file = 'var-ref.test.sh'
test_case = TestCase(name='Array indirect expansion with @? conversion', script='declare -A ref=([\'dummy\']=v1)\nfunction test-op..., "\'3\'"]\n[\'1\', \'2\', \'3\']\n[\'a\', \'a\', \'a\']', shells=['bash'], variant='OK')], line_number=696, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Array indirect expansion with @? conversion (line 696)
E           
E           stdout mismatch:
E             expected: '==== v1 ====\n["\'value\'"]\n[\'value\']\n[\'\']\n==== v2 ====\n["\'\'"]\n[\'\']\n[\'\']\n==== a1 ====\n[\'\']\n[\'\']\n[\'a\']\n==== a2[0] ====\n["\'element\'"]\n[\'element\']\n[\'a\']\n==== a3[@] ====\n["\'1\'", "\'2\'", "\'3\'"]\n[\'1\', \'2\', \'3\']\n[\'a\', \'a\', \'a\']'
E             actual:   "==== v1 ====\n['']\n['']\n['']\n==== v2 ====\n['']\n['']\n['']\n==== a1 ====\n['']\n['']\n['']\n==== a2[0] ====\n['']\n['']\n['']\n==== a3[@] ====\n['']\n['']\n['']"
E           
E           Expected stdout: '==== v1 ====\n["\'value\'"]\n[\'value\']\n[\'\']\n==== v2 ====\n["\'\'"]\n[\'\']\n[\'\']\n==== a1 ====\n[\'\']\n[\'\']\n[\'a\']\n==== a2[0] ====\n["\'element\'"]\n[\'element\']\n[\'a\']\n==== a3[@] ====\n["\'1\'", "\'2\'", "\'3\'"]\n[\'1\', \'2\', \'3\']\n[\'a\', \'a\', \'a\']'
E           Actual stdout:   "==== v1 ====\n['']\n['']\n['']\n==== v2 ====\n['']\n['']\n['']\n==== a1 ====\n['']\n['']\n['']\n==== a2[0] ====\n['']\n['']\n['']\n==== a3[@] ====\n['']\n['']\n['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A ref=(['dummy']=v1)
E           function test-op0 {
E             echo "==== $1 ===="
E             ref['dummy']=$1
E             argv.py "${!ref[@]@Q}"
E             argv.py "${!ref[@]@P}"
E             argv.py "${!ref[@]@a}"
E           }
E           
E           v1=value
E           test-op0 v1
E           
E           v2=
E           test-op0 v2
E           
E           a1=()
E           test-op0 a1
E           
E           a2=(element)
E           test-op0 'a2[0]'
E           
E           a3=(1 2 3)
E           test-op0 'a3[@]'
E           
E           
E           # Bash 4.4 had a bug in the section "==== a3[@] ====":
E           #
E           # ==== a3[@] ====
E           # []
E           # []
E           # []
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::array with empty values[L27]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941af30>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='array with empty values', script='declare -a A=(\'\' x "" \'\')\nargv.py "${A[@]}"', assertions=[Assert...'mksh'], variant='N-I'), Assertion(type='status', value=1, shells=['mksh'], variant='N-I')], line_number=27, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: array with empty values (line 27)
E           
E           stdout mismatch:
E             expected: "['', 'x', '', '']"
E             actual:   "['x']"
E           
E           Expected stdout: "['', 'x', '', '']"
E           Actual stdout:   "['x']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -a A=('' x "" '')
E           argv.py "${A[@]}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::Inner single quotes, outer double quotes[L61]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941b2f0>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='Inner single quotes, outer double quotes', script='# This is the WEIRD ONE.  Single quotes appear outsi...b\'}"', assertions=[Assertion(type='stdout', value='["\'b\'"]', shells=None, variant=None)], line_number=61, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Inner single quotes, outer double quotes (line 61)
E           
E           stdout mismatch:
E             expected: '["\'b\'"]'
E             actual:   "['b']"
E           
E           Expected stdout: '["\'b\'"]'
E           Actual stdout:   "['b']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # This is the WEIRD ONE.  Single quotes appear outside.  But all shells agree!
E           argv.py "${Unset:-'b'}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::Multiple words: no outer quotes, inner single quotes[L78]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941b5f0>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='Multiple words: no outer quotes, inner single quotes', script="argv.py ${Unset:-'a b c'}", assertions=[Assertion(type='stdout', value="['a b c']", shells=None, variant=None)], line_number=78, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Multiple words: no outer quotes, inner single quotes (line 78)
E           
E           stdout mismatch:
E             expected: "['a b c']"
E             actual:   "['a', 'b', 'c']"
E           
E           Expected stdout: "['a b c']"
E           Actual stdout:   "['a', 'b', 'c']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           argv.py ${Unset:-'a b c'}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::Multiple words: no outer quotes, inner double quotes[L82]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941b6b0>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='Multiple words: no outer quotes, inner double quotes', script='argv.py ${Unset:-"a b c"}', assertions=[Assertion(type='stdout', value="['a b c']", shells=None, variant=None)], line_number=82, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Multiple words: no outer quotes, inner double quotes (line 82)
E           
E           stdout mismatch:
E             expected: "['a b c']"
E             actual:   "['a', 'b', 'c']"
E           
E           Expected stdout: "['a b c']"
E           Actual stdout:   "['a', 'b', 'c']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           argv.py ${Unset:-"a b c"}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::Multiple words: outer double quotes, inner single quotes[L94]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941b8f0>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='Multiple words: outer double quotes, inner single quotes', script='argv.py "${Unset:-\'a b c\'}"\n# WEI....', assertions=[Assertion(type='stdout', value='["\'a b c\'"]', shells=None, variant=None)], line_number=94, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Multiple words: outer double quotes, inner single quotes (line 94)
E           
E           stdout mismatch:
E             expected: '["\'a b c\'"]'
E             actual:   "['a b c']"
E           
E           Expected stdout: '["\'a b c\'"]'
E           Actual stdout:   "['a b c']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           argv.py "${Unset:-'a b c'}"
E           # WEIRD ONE.
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::Mixed inner quotes[L99]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941b9b0>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='Mixed inner quotes', script='argv.py ${Unset:-"a b" c}', assertions=[Assertion(type='stdout', value="['a b', 'c']", shells=None, variant=None)], line_number=99, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Mixed inner quotes (line 99)
E           
E           stdout mismatch:
E             expected: "['a b', 'c']"
E             actual:   "['a', 'b', 'c']"
E           
E           Expected stdout: "['a b', 'c']"
E           Actual stdout:   "['a', 'b', 'c']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           argv.py ${Unset:-"a b" c}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::part_value tree with multiple words[L107]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941bb30>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='part_value tree with multiple words', script='argv.py ${a:-${a:-"1 2" "3 4"}5 "6 7"}', assertions=[Assertion(type='stdout', value="['1 2', '3 45', '6 7']", shells=None, variant=None)], line_number=107, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: part_value tree with multiple words (line 107)
E           
E           stdout mismatch:
E             expected: "['1 2', '3 45', '6 7']"
E             actual:   "['1', '2', '3', '45', '6', '7']"
E           
E           Expected stdout: "['1 2', '3 45', '6 7']"
E           Actual stdout:   "['1', '2', '3', '45', '6', '7']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           argv.py ${a:-${a:-"1 2" "3 4"}5 "6 7"}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::Multiple words: no outer quotes, inner double quotes[L127]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10941be30>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='Multiple words: no outer quotes, inner double quotes', script='var=\'a b c\'\nargv.py ${Unset:-"$var"}', assertions=[Assertion(type='stdout', value="['a b c']", shells=None, variant=None)], line_number=127, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Multiple words: no outer quotes, inner double quotes (line 127)
E           
E           stdout mismatch:
E             expected: "['a b c']"
E             actual:   "['a', 'b', 'c']"
E           
E           Expected stdout: "['a b c']"
E           Actual stdout:   "['a', 'b', 'c']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           var='a b c'
E           argv.py ${Unset:-"$var"}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::Multiple words: outer double quotes, inner single quotes[L142]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094440b0>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='Multiple words: outer double quotes, inner single quotes', script='# WEIRD ONE.\n#\n# I think I should ...', assertions=[Assertion(type='stdout', value='["\'a b c\'"]', shells=None, variant=None)], line_number=142, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Multiple words: outer double quotes, inner single quotes (line 142)
E           
E           stdout mismatch:
E             expected: '["\'a b c\'"]'
E             actual:   "['$var']"
E           
E           Expected stdout: '["\'a b c\'"]'
E           Actual stdout:   "['$var']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # WEIRD ONE.
E           #
E           # I think I should just disallow any word with single quotes inside double
E           # quotes.
E           var='a b c'
E           argv.py "${Unset:-'$var'}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::No outer quotes, Multiple internal quotes[L151]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109444170>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='No outer quotes, Multiple internal quotes', script='# It\'s like a single command word.  Parts are join...(type='stdout', value="['Aa', 'b', 'c', ' a b cD', 'E', 'F']", shells=None, variant=None)], line_number=151, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: No outer quotes, Multiple internal quotes (line 151)
E           
E           stdout mismatch:
E             expected: "['Aa', 'b', 'c', ' a b cD', 'E', 'F']"
E             actual:   "['Aa', 'b', 'c', 'a', 'b', 'cD', 'E', 'F']"
E           
E           Expected stdout: "['Aa', 'b', 'c', ' a b cD', 'E', 'F']"
E           Actual stdout:   "['Aa', 'b', 'c', 'a', 'b', 'cD', 'E', 'F']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # It's like a single command word.  Parts are joined directly.
E           var='a b c'
E           argv.py ${Unset:-A$var " $var"D E F}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::Syntax error for single quote in double quote[L179]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109444530>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='Syntax error for single quote in double quote', script='foo="\'a b c d\'"\nargv.py "${foo%d\'}"', asser...lls=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=179, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Syntax error for single quote in double quote (line 179)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   '["\'a b c d\'"]'
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   '["\'a b c d\'"]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           foo="'a b c d'"
E           argv.py "${foo%d'}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::"${undef-'c d'}" and "${foo%'c d'}" are parsed differently[L186]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094445f0>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='"${undef-\'c d\'}" and "${foo%\'c d\'}" are parsed differently', script='# quotes are LITERAL here\narg...d\'"]\n[\'c d\', \'c  d\']\n---\n[\'a b \', \'a b c d\']', shells=['dash'], variant='OK')], line_number=186, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: "${undef-'c d'}" and "${foo%'c d'}" are parsed differently (line 186)
E           
E           stdout mismatch:
E             expected: '["\'c d\'", "\'c  d\'"]\n[\'c d\', \'c  d\']\n---\n[\'a b \', \'a b c d\']\n[\'a b zzz\', \'a b c d\']\n[\'a b zzz\', \'a b c d\']'
E             actual:   "['c d', 'c  d']\n['c', 'd', 'c', 'd']\n---\n['a b ', 'a b c d']\n['a b zzz', 'a b c d']\n['a b zzz', 'a b c d']"
E           
E           Expected stdout: '["\'c d\'", "\'c  d\'"]\n[\'c d\', \'c  d\']\n---\n[\'a b \', \'a b c d\']\n[\'a b zzz\', \'a b c d\']\n[\'a b zzz\', \'a b c d\']'
E           Actual stdout:   "['c d', 'c  d']\n['c', 'd', 'c', 'd']\n---\n['a b ', 'a b c d']\n['a b zzz', 'a b c d']\n['a b zzz', 'a b c d']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # quotes are LITERAL here
E           argv.py "${undef-'c d'}" "${undef-'c  d'}"
E           argv.py ${undef-'c d'} ${undef-'c  d'}
E           
E           echo ---
E           
E           # quotes are RESPECTED here
E           foo='a b c d'
E           argv.py "${foo%'c d'}" "${foo%'c  d'}"
E           
E           case $SH in dash) exit ;; esac
E           
E           argv.py "${foo//'c d'/zzz}" "${foo//'c  d'/zzz}"
E           argv.py "${foo//'c d'/'zzz'}" "${foo//'c  d'/'zzz'}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::single quotes work inside character classes[L269]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094448f0>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='single quotes work inside character classes', script='x=\'a[[[---]]]b\'\necho "${x//[\'[]\']}"', assert...dash'], variant='N-I'), Assertion(type='status', value=2, shells=['dash'], variant='N-I')], line_number=269, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: single quotes work inside character classes (line 269)
E           
E           stdout mismatch:
E             expected: 'a---b'
E             actual:   'a[[[---]]]b'
E           
E           Expected stdout: 'a---b'
E           Actual stdout:   'a[[[---]]]b\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x='a[[[---]]]b'
E           echo "${x//['[]']}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::comparison: :- operator with single quoted arg[L281]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094449b0>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='comparison: :- operator with single quoted arg', script='echo ${unset:-\'a\'}\necho "${unset:-\'a\'}"', assertions=[Assertion(type='stdout', value="a\n'a'", shells=None, variant=None)], line_number=281, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: comparison: :- operator with single quoted arg (line 281)
E           
E           stdout mismatch:
E             expected: "a\n'a'"
E             actual:   'a\na'
E           
E           Expected stdout: "a\n'a'"
E           Actual stdout:   'a\na\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo ${unset:-'a'}
E           echo "${unset:-'a'}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::Right Brace as argument (similar to #702)[L290]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109444a70>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='Right Brace as argument (similar to #702)', script='echo "${var-}}"\necho "${var-\\}}"\necho "${var-\'}...iant='BUG'), Assertion(type='stdout', value='}\n}\n}\n}', shells=['yash'], variant='BUG')], line_number=290, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Right Brace as argument (similar to #702) (line 290)
E           
E           stdout mismatch:
E             expected: "}\n}\n'}'\n}"
E             actual:   "}\n\\}\n'}\n}"
E           
E           Expected stdout: "}\n}\n'}'\n}"
E           Actual stdout:   "}\n\\}\n'}\n}\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo "${var-}}"
E           echo "${var-\}}"
E           echo "${var-'}'}"
E           echo "${var-"}"}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::Var substitution with newlines (#2492)[L315]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109444b30>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='Var substitution with newlines (#2492)', script='echo "${var-a \\\nb}"\necho "${var-a\nb}"\n\necho "${v...ertion(type='stdout', value='a b\na\nb\nc d\nc\nd\ne f\ne\nf', shells=None, variant=None)], line_number=315, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Var substitution with newlines (#2492) (line 315)
E           
E           stdout mismatch:
E             expected: 'a b\na\nb\nc d\nc\nd\ne f\ne\nf'
E             actual:   'a\nb\na\nb\nc\nd\nc\nd\ne\nf\ne\nf'
E           
E           Expected stdout: 'a b\na\nb\nc d\nc\nd\ne f\ne\nf'
E           Actual stdout:   'a \nb\na\nb\nc \nd\nc\nd\ne \nf\ne\nf\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo "${var-a \
E           b}"
E           echo "${var-a
E           b}"
E           
E           echo "${var:-c \
E           d}"
E           echo "${var:-c
E           d}"
E           
E           var=set
E           echo "${var:+e \
E           f}"
E           echo "${var:+e
E           f}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub-quote.test.sh::Var substitution with \\n in value[L345]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109444bf0>
test_file = 'var-sub-quote.test.sh'
test_case = TestCase(name='Var substitution with \\n in value', script='echo "${var-a\\nb}"\necho "${var:-c\\nd}"\nvar=val\necho "...ssertion(type='stdout', value='a\nb\nc\nd\ne\nf', shells=['dash', 'mksh'], variant='BUG')], line_number=345, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Var substitution with \n in value (line 345)
E           
E           stdout mismatch:
E             expected: 'a\\nb\nc\\nd\ne\\nf'
E             actual:   'anb\ncnd\nenf'
E           
E           Expected stdout: 'a\\nb\nc\\nd\ne\\nf'
E           Actual stdout:   'anb\ncnd\nenf\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo "${var-a\nb}"
E           echo "${var:-c\nd}"
E           var=val
E           echo "${var:+e\nf}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
______ TestBashSpecTests.test_spec_case[var-sub.test.sh::Bad var sub[L8]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109444cb0>
test_file = 'var-sub.test.sh'
test_case = TestCase(name='Bad var sub', script='echo ${a&}', assertions=[Assertion(type='stdout-json', value='', shells=None, var...ne, variant=None), Assertion(type='status', value=1, shells=['bash', 'mksh'], variant='OK')], line_number=8, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Bad var sub (line 8)
E           
E           status mismatch: expected one of [2, 1], got 0
E           
E           Expected stdout: ''
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           echo ${a&}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub.test.sh::Braced block inside ${}[L14]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109444d70>
test_file = 'var-sub.test.sh'
test_case = TestCase(name='Braced block inside ${}', script='# NOTE: This bug was in bash 4.3 but fixed in bash 4.4.\necho ${foo:-...ls; })}', assertions=[Assertion(type='stdout', value='/bin/ls', shells=None, variant=None)], line_number=14, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Braced block inside ${} (line 14)
E           
E           Execution error: Command substitution requires async expansion
E           
E           
E           Script:
E           ---
E           # NOTE: This bug was in bash 4.3 but fixed in bash 4.4.
E           echo ${foo:-$({ ls /bin/ls; })}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub.test.sh::Filename redirect with "$@"[L24]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109444ef0>
test_file = 'var-sub.test.sh'
test_case = TestCase(name='Filename redirect with "$@"', script='# bash - ambiguous redirect -- yeah I want this error\n#   - But ...ells=None, variant=None), Assertion(type='status', value=2, shells=['dash'], variant='OK')], line_number=24, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Filename redirect with "$@" (line 24)
E           
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash - ambiguous redirect -- yeah I want this error
E           #   - But I want it at PARSE time?  So is there a special DollarAtPart?
E           #     MultipleArgsPart?
E           # mksh - tries to create '_tmp/var-sub1 _tmp/var-sub2'
E           # dash - tries to create '_tmp/var-sub1 _tmp/var-sub2'
E           fun() {
E             echo hi > "$@"
E           }
E           fun _tmp/var-sub1 _tmp/var-sub2
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub.test.sh::Descriptor redirect to bad "$@"[L37]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109444fb0>
test_file = 'var-sub.test.sh'
test_case = TestCase(name='Descriptor redirect to bad "$@"', script='# All of them give errors:\n# dash - bad fd number, parse err...ells=None, variant=None), Assertion(type='status', value=2, shells=['dash'], variant='OK')], line_number=37, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Descriptor redirect to bad "$@" (line 37)
E           
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # All of them give errors:
E           # dash - bad fd number, parse error?
E           # bash - ambiguous redirect
E           # mksh - illegal file descriptor name
E           set -- '2 3' 'c d'
E           echo hi 1>& "$@"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[var-sub.test.sh::Here doc with bad "$@" delimiter[L47]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109445070>
test_file = 'var-sub.test.sh'
test_case = TestCase(name='Here doc with bad "$@" delimiter', script='# bash - syntax error\n# dash - syntax error: end of file un...ells=None, variant=None), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=47, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Here doc with bad "$@" delimiter (line 47)
E           
E           Execution error: Expected '}' to close command group at line 14, column 1
E           
E           
E           Script:
E           ---
E           # bash - syntax error
E           # dash - syntax error: end of file unexpected
E           # mksh - runtime error: here document unclosed
E           #
E           # What I want is syntax error: bad delimiter!
E           #
E           # This means that "$@" should be part of the parse tree then?  Anything that
E           # involves more than one token.
E           fun() {
E             cat << "$@"
E           hi
E           1 2
E           }
E           fun 1 2
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-bash.test.sh::$SHELL is set to what is in /etc/passwd[L4]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109445130>
test_file = 'vars-bash.test.sh'
test_case = TestCase(name='$SHELL is set to what is in /etc/passwd', script='sh=$(which $SH)\n\nunset SHELL\n\nprog=\'\nif test -n...nt=None), Assertion(type='stdout', value='', shells=['dash', 'mksh', 'zsh'], variant='N-I')], line_number=4, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $SHELL is set to what is in /etc/passwd (line 4)
E           
E           Execution error: Expected 'fi' to close if statement at line 1, column 26
E           
E           
E           Script:
E           ---
E           sh=$(which $SH)
E           
E           unset SHELL
E           
E           prog='
E           if test -n "$SHELL"; then
E             # the exact value is different on CI, so do not assert
E             echo SHELL is set
E             echo SHELL=$SHELL >&2
E           fi
E           '
E           
E           $SH -c "$prog"
E           
E           $SH -i -c "$prog"
E           
E           # make it a login shell
E           $SH -l -c "$prog"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::$PATH is set if unset at startup[L31]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109445370>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$PATH is set if unset at startup', script="# WORKAROUND for Python version of bin/osh -- we can't run b...es\nfi", assertions=[Assertion(type='stdout', value='yes\nyes', shells=None, variant=None)], line_number=31, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $PATH is set if unset at startup (line 31)
E           
E           stdout mismatch:
E             expected: 'yes\nyes'
E             actual:   ''
E           
E           Expected stdout: 'yes\nyes'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: /usr/bin/bash: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # WORKAROUND for Python version of bin/osh -- we can't run bin/oils_for_unix.py
E           # because it a shebang #!/usr/bin/env python2
E           # This test is still useful for the C++ oils-for-unix.
E           
E           case $SH in
E             */bin/osh)
E               echo yes
E               echo yes
E               exit
E               ;;
E           esac
E           
E           # Get absolute path before changing PATH
E           sh=$(which $SH)
E           
E           old_path=$PATH
E           unset PATH
E           
E           $sh -c 'echo $PATH' > path.txt
E           
E           PATH=$old_path
E           
E           # looks like PATH=/usr/bin:/bin for mksh, but more complicated for others
E           # cat path.txt
E           
E           # should contain /usr/bin
E           if egrep -q '(^|:)/usr/bin($|:)' path.txt; then
E             echo yes
E           fi
E           
E           # should contain /bin
E           if egrep -q '(^|:)/bin($|:)' path.txt ; then
E             echo yes
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::$HOME is NOT set[L73]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109445430>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$HOME is NOT set', script='case $SH in *zsh) echo \'zsh sets HOME\'; exit ;; esac\n\nhome=$(echo $HOME)...iant=None), Assertion(type='stdout', value='zsh sets HOME', shells=['zsh'], variant='BUG')], line_number=73, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $HOME is NOT set (line 73)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=1\nstatus=1'
E             actual:   'status=1\nHOME=/tmp\nstatus=0\nstatus=1'
E           
E           Expected stdout: 'status=0\nstatus=1\nstatus=1'
E           Actual stdout:   'status=1\nHOME=/tmp\nstatus=0\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in *zsh) echo 'zsh sets HOME'; exit ;; esac
E           
E           home=$(echo $HOME)
E           test "$home" = ""
E           echo status=$?
E           
E           env | grep HOME
E           echo status=$?
E           
E           # not in interactive shell either
E           $SH -i -c 'echo $HOME' | grep /
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::Vars set interactively only: $HISTFILE[L97]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094454f0>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='Vars set interactively only: $HISTFILE', script="case $SH in dash|mksh|zsh) exit ;; esac\n\n$SH --norc ...t=None), Assertion(type='stdout', value='', shells=['dash', 'mksh', 'zsh'], variant='N-I')], line_number=97, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Vars set interactively only: $HISTFILE (line 97)
E           
E           stdout mismatch:
E             expected: 'histfile=\nhistfile=yes'
E             actual:   ''
E           
E           Expected stdout: 'histfile=\nhistfile=yes'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: --norc: No such file or directory\nbash: --norc: No such file or directory\n'
E           Expected status: None
E           Actual status:   127
E           
E           Script:
E           ---
E           case $SH in dash|mksh|zsh) exit ;; esac
E           
E           $SH --norc --rcfile /dev/null -c 'echo histfile=${HISTFILE:+yes}'
E           $SH --norc --rcfile /dev/null -i -c 'echo histfile=${HISTFILE:+yes}'
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::Some vars are set, even without startup file, or env: PATH, PWD[L112]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094455b0>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='Some vars are set, even without startup file, or env: PATH, PWD', script='flags=\'\'\ncase $SH in\n  da...'zsh'], variant='OK'), Assertion(type='stdout', value='', shells=['dash'], variant='N-I')], line_number=112, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Some vars are set, even without startup file, or env: PATH, PWD (line 112)
E           
E           stdout mismatch:
E             expected: 'path pwd ps4 0\nshellopts 0\nhome ps1 1\nifs 0'
E             actual:   'path pwd ps4 1\nshellopts 1\nhome ps1 1\nifs 1'
E           
E           Expected stdout: 'path pwd ps4 0\nshellopts 0\nhome ps1 1\nifs 0'
E           Actual stdout:   'path pwd ps4 1\nshellopts 1\nhome ps1 1\nifs 1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: /usr/bin/env: command not found\nbash: /usr/bin/env: command not found\nbash: /usr/bin/env: command not found\nbash: /usr/bin/env: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           flags=''
E           case $SH in
E             dash) exit ;;
E             bash*)
E               flags='--noprofile --norc --rcfile /devnull'
E               ;;
E             osh)
E               flags='--rcfile /devnull'
E               ;;
E           esac
E           
E           sh_path=$(which $SH)
E           
E           case $sh_path in
E             */bin/osh)
E               # Hack for running with Python2
E               export PYTHONPATH="$REPO_ROOT:$REPO_ROOT/vendor"
E               sh_prefix="$(which python2) $REPO_ROOT/bin/oils_for_unix.py osh"
E               ;;
E             *)
E               sh_prefix=$sh_path
E               ;;
E           esac
E           
E           #echo PATH=$PATH
E           
E           
E           # mksh has typeset, not declare
E           # bash exports PWD, but not PATH PS4
E           
E           /usr/bin/env -i PYTHONPATH=$PYTHONPATH $sh_prefix $flags -c 'typeset -p PATH PWD PS4' >&2
E           echo path pwd ps4 $?
E           
E           /usr/bin/env -i PYTHONPATH=$PYTHONPATH $sh_prefix $flags -c 'typeset -p SHELLOPTS' >&2
E           echo shellopts $?
E           
E           # bash doesn't set HOME, mksh and zsh do
E           /usr/bin/env -i PYTHONPATH=$PYTHONPATH $sh_prefix $flags -c 'typeset -p HOME PS1' >&2
E           echo home ps1 $?
E           
E           # IFS is set, but not exported
E           /usr/bin/env -i PYTHONPATH=$PYTHONPATH $sh_prefix $flags -c 'typeset -p IFS' >&2
E           echo ifs $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::UID EUID PPID can't be changed[L183]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109445670>
test_file = 'vars-special.test.sh'
test_case = TestCase(name="UID EUID PPID can't be changed", script="# bash makes these 3 read-only\n{\n  UID=xx $SH -c 'echo uid=$...type='stdout', value='uid=xx\neuid=xx\nstatus=0', shells=['dash', 'mksh'], variant='BUG')], line_number=183, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: UID EUID PPID can't be changed (line 183)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   'uid=xx\neuid=xx\nppid=xx\nstatus=0'
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   'uid=xx\neuid=xx\nppid=xx\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash makes these 3 read-only
E           {
E             UID=xx $SH -c 'echo uid=$UID'
E           
E             EUID=xx $SH -c 'echo euid=$EUID'
E           
E             PPID=xx $SH -c 'echo ppid=$PPID'
E           
E           } > out.txt
E           
E           # bash shows that vars are readonly
E           # zsh shows other errors
E           # cat out.txt
E           #echo
E           
E           grep '=xx' out.txt
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_______ TestBashSpecTests.test_spec_case[vars-special.test.sh::$?[L260]] _______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094458b0>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$?', script="echo $?  # starts out as 0\nsh -c 'exit 33'\necho $?", assertions=[Assertion(type='stdout'... shells=None, variant=None), Assertion(type='status', value=0, shells=None, variant=None)], line_number=260, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $? (line 260)
E           
E           stdout mismatch:
E             expected: '0\n33'
E             actual:   '0'
E           status mismatch: expected 0, got 33
E           
E           Expected stdout: '0\n33'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   33
E           
E           Script:
E           ---
E           echo $?  # starts out as 0
E           sh -c 'exit 33'
E           echo $?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::$BASHPID DOES change with subshell and command sub[L308]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109445bb0>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$BASHPID DOES change with subshell and command sub', script='set -o errexit\ndie() {\n  echo 1>&2 "$@";...nt='N-I'), Assertion(type='stdout-json', value='', shells=['dash', 'zsh'], variant='N-I')], line_number=308, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $BASHPID DOES change with subshell and command sub (line 308)
E           
E           stdout mismatch:
E             expected: 'subshell OK\ncommand sub OK'
E             actual:   ''
E           status mismatch: expected 3, got 1
E           
E           Expected stdout: 'subshell OK\ncommand sub OK'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 3
E           Actual status:   1
E           
E           Script:
E           ---
E           set -o errexit
E           die() {
E             echo 1>&2 "$@"; exit 1
E           }
E           parent=$BASHPID
E           test -n "$parent" || die "empty BASHPID in parent"
E           ( child=$BASHPID
E             test -n "$child" || die "empty BASHPID in subshell"
E             test "$parent" != "$child" || die "should not be equal: $parent = $child"
E             echo 'subshell OK'
E           )
E           echo $( child=$BASHPID
E                   test -n "$child" || die "empty BASHPID in command sub"
E                   test "$parent" != "$child" ||
E                     die "should not be equal: $parent = $child"
E                   echo 'command sub OK'
E                 )
E           exit 3  # make sure we got here
E           
E           # mksh also implements BASHPID!
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::Background PID $! looks like a PID[L338]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109445c70>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='Background PID $! looks like a PID', script="sleep 0.01 &\npid=$!\nwait\necho $pid | egrep '[0-9]+' >/d...us=$?", assertions=[Assertion(type='stdout', value='status=0', shells=None, variant=None)], line_number=338, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Background PID $! looks like a PID (line 338)
E           
E           stdout mismatch:
E             expected: 'status=0'
E             actual:   'status=1'
E           
E           Expected stdout: 'status=0'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           sleep 0.01 &
E           pid=$!
E           wait
E           echo $pid | egrep '[0-9]+' >/dev/null
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_____ TestBashSpecTests.test_spec_case[vars-special.test.sh::$PPID[L346]] ______

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109445d30>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$PPID', script="echo $PPID | egrep '[0-9]+'\n\n# NOTE: There is also $BASHPID", assertions=[Assertion(type='status', value=0, shells=None, variant=None)], line_number=346, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $PPID (line 346)
E           
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           echo $PPID | egrep '[0-9]+'
E           
E           # NOTE: There is also $BASHPID
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::$UID and $EUID[L371]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109445f70>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$UID and $EUID', script="# These are both bash-specific.\nset -o errexit\necho $UID | egrep -o '[0-9]+'...variant='N-I'), Assertion(type='status', value=1, shells=['dash', 'mksh'], variant='N-I')], line_number=371, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $UID and $EUID (line 371)
E           
E           stdout mismatch:
E             expected: 'status=0'
E             actual:   ''
E           
E           Expected stdout: 'status=0'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # These are both bash-specific.
E           set -o errexit
E           echo $UID | egrep -o '[0-9]+' >/dev/null
E           echo $EUID | egrep -o '[0-9]+' >/dev/null
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::$OSTYPE is non-empty[L381]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109446030>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$OSTYPE is non-empty', script='test -n "$OSTYPE"\necho status=$?', assertions=[Assertion(type='stdout',...None), Assertion(type='stdout', value='status=1', shells=['dash', 'mksh'], variant='N-I')], line_number=381, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $OSTYPE is non-empty (line 381)
E           
E           stdout mismatch:
E             expected: 'status=0'
E             actual:   'status=1'
E           
E           Expected stdout: 'status=0'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           test -n "$OSTYPE"
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
___ TestBashSpecTests.test_spec_case[vars-special.test.sh::$HOSTNAME[L391]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094460f0>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$HOSTNAME', script='test "$HOSTNAME" = "$(hostname)"\necho status=$?', assertions=[Assertion(type='stdo...Assertion(type='stdout', value='status=1', shells=['dash', 'mksh', 'zsh'], variant='N-I')], line_number=391, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $HOSTNAME (line 391)
E           
E           stdout mismatch:
E             expected: 'status=0'
E             actual:   'status=1'
E           
E           Expected stdout: 'status=0'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           test "$HOSTNAME" = "$(hostname)"
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::$LINENO in "bare" redirect arg (bug regression)[L431]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109446270>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$LINENO in "bare" redirect arg (bug regression)', script='filename=$TMP/bare3\nrm -f $filename\n> $TMP/...ls=None, variant=None), Assertion(type='stdout', value='', shells=['zsh'], variant='BUG')], line_number=431, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $LINENO in "bare" redirect arg (bug regression) (line 431)
E           
E           stdout mismatch:
E             expected: 'written\n5'
E             actual:   '5'
E           
E           Expected stdout: 'written\n5'
E           Actual stdout:   '5\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           filename=$TMP/bare3
E           rm -f $filename
E           > $TMP/bare$LINENO
E           test -f $filename && echo written
E           echo $LINENO
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::$LINENO in [[[L455]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094463f0>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$LINENO in [[', script='echo one\n[[ $LINENO -eq 2 ]] && echo OK', assertions=[Assertion(type='stdout',...'], variant='N-I'), Assertion(type='stdout', value='one', shells=['mksh'], variant='N-I')], line_number=455, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $LINENO in [[ (line 455)
E           
E           stdout mismatch:
E             expected: 'one\nOK'
E             actual:   'one'
E           
E           Expected stdout: 'one\nOK'
E           Actual stdout:   'one\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           echo one
E           [[ $LINENO -eq 2 ]] && echo OK
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::$LINENO in (([L467]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094464b0>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$LINENO in ((', script='echo one\n(( x = LINENO ))\necho $x', assertions=[Assertion(type='stdout', valu...e, variant=None), Assertion(type='stdout', value='one\n', shells=['dash'], variant='N-I')], line_number=467, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $LINENO in (( (line 467)
E           
E           stdout mismatch:
E             expected: 'one\n2'
E             actual:   'one\n1'
E           
E           Expected stdout: 'one\n2'
E           Actual stdout:   'one\n1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo one
E           (( x = LINENO ))
E           echo $x
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::$LINENO in other for loops[L498]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109446630>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$LINENO in other for loops', script='set -- a b c\nfor x; do\n  echo $LINENO $x\ndone', assertions=[Assertion(type='stdout', value='3 a\n3 b\n3 c', shells=None, variant=None)], line_number=498, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $LINENO in other for loops (line 498)
E           
E           stdout mismatch:
E             expected: '3 a\n3 b\n3 c'
E             actual:   ''
E           
E           Expected stdout: '3 a\n3 b\n3 c'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- a b c
E           for x; do
E             echo $LINENO $x
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::$LINENO in for (( loop[L509]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094466f0>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$LINENO in for (( loop', script='echo one\nfor (( i = 0; i < $LINENO; i++ )); do\n  echo $i\ndone', ass...mksh'], variant='BUG'), Assertion(type='status', value=1, shells=['mksh'], variant='BUG')], line_number=509, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $LINENO in for (( loop (line 509)
E           
E           stdout mismatch:
E             expected: 'one\n0\n1'
E             actual:   'one\n0\n1\n2'
E           
E           Expected stdout: 'one\n0\n1'
E           Actual stdout:   'one\n0\n1\n2\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo one
E           for (( i = 0; i < $LINENO; i++ )); do
E             echo $i
E           done
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[vars-special.test.sh::$_ and ${_}[L560]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094469f0>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$_ and ${_}', script='case $SH in dash|mksh) exit ;; esac\n\n_var=value\n\n: 42\necho $_ $_var ${_}var\...nt=None), Assertion(type='stdout-json', value='', shells=['dash', 'mksh'], variant='N-I')], line_number=560, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $_ and ${_} (line 560)
E           
E           stdout mismatch:
E             expected: '42 value 42var\nfoobar'
E             actual:   '42 42var 42var\nfoobar'
E           
E           Expected stdout: '42 value 42var\nfoobar'
E           Actual stdout:   '42 42var 42var\nfoobar\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|mksh) exit ;; esac
E           
E           _var=value
E           
E           : 42
E           echo $_ $_var ${_}var
E           
E           : 'foo'"bar"
E           echo $_
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::$_ with pipeline and subshell[L591]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109446b70>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$_ with pipeline and subshell', script='case $SH in dash|mksh) exit ;; esac\n\nshopt -s lastpipe\n\nseq...nt='OK'), Assertion(type='stdout-json', value='', shells=['dash', 'mksh'], variant='N-I')], line_number=591, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $_ with pipeline and subshell (line 591)
E           
E           stdout mismatch:
E             expected: 'last=\npipeline=last=\nsubshell=pipeline=last=\ndone=pipeline=last='
E             actual:   'last=3\npipeline=last=3\nsubshell=\ndone=pipeline=last=3'
E           
E           Expected stdout: 'last=\npipeline=last=\nsubshell=pipeline=last=\ndone=pipeline=last='
E           Actual stdout:   'last=3\npipeline=last=3\nsubshell=\ndone=pipeline=last=3\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|mksh) exit ;; esac
E           
E           shopt -s lastpipe
E           
E           seq 3 | echo last=$_
E           
E           echo pipeline=$_
E           
E           ( echo subshell=$_ )
E           echo done=$_
E           
E           
E           # very weird semantics for zsh!
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[vars-special.test.sh::$_ with assignments, arrays, etc.[L663]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109446db0>
test_file = 'vars-special.test.sh'
test_case = TestCase(name='$_ with assignments, arrays, etc.', script='case $SH in dash|mksh) exit ;; esac\n\n: foo\necho "colon [...nt='OK'), Assertion(type='stdout-json', value='', shells=['dash', 'mksh'], variant='N-I')], line_number=663, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $_ with assignments, arrays, etc. (line 663)
E           
E           stdout mismatch:
E             expected: 'colon [foo]\nbare assign []\ndeclare [s=bar]\narray []\ndeclare array [a]\ndeclare flag [d]'
E             actual:   'colon [foo]\nbare assign [colon [foo]]\ndeclare [s=bar]\narray [declare [s=bar]]\ndeclare array [a=(1 2)]\ndeclare flag [d=(1 2)]'
E           
E           Expected stdout: 'colon [foo]\nbare assign []\ndeclare [s=bar]\narray []\ndeclare array [a]\ndeclare flag [d]'
E           Actual stdout:   'colon [foo]\nbare assign [colon [foo]]\ndeclare [s=bar]\narray [declare [s=bar]]\ndeclare array [a=(1 2)]\ndeclare flag [d=(1 2)]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|mksh) exit ;; esac
E           
E           : foo
E           echo "colon [$_]"
E           
E           s=bar
E           echo "bare assign [$_]"
E           
E           # zsh uses declare; bash uses s=bar
E           declare s=bar
E           echo "declare [$_]"
E           
E           # zsh remains s:declare, bash resets it
E           a=(1 2)
E           echo "array [$_]"
E           
E           # zsh sets it to declare, bash uses the LHS a
E           declare a=(1 2)
E           echo "declare array [$_]"
E           
E           declare -g d=(1 2)
E           echo "declare flag [$_]"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[whitespace.test.sh::Parsing shell words \\r \\v[L3]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109447170>
test_file = 'whitespace.test.sh'
test_case = TestCase(name='Parsing shell words \\r \\v', script='# frontend/lexer_def.py has rules for this\n\ntab=$(python2 -c \'...tdout', value="['-', '-']\n['-\\r-']\n['-\\x0b-']\n['-\\x0c-']", shells=None, variant=None)], line_number=3, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Parsing shell words \r \v (line 3)
E           
E           stdout mismatch:
E             expected: "['-', '-']\n['-\\r-']\n['-\\x0b-']\n['-\\x0c-']"
E             actual:   ''
E           
E           Expected stdout: "['-', '-']\n['-\\r-']\n['-\\x0b-']\n['-\\x0c-']"
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # frontend/lexer_def.py has rules for this
E           
E           tab=$(python2 -c 'print "argv.py -\t-"')
E           cr=$(python2 -c 'print "argv.py -\r-"')
E           vert=$(python2 -c 'print "argv.py -\v-"')
E           ff=$(python2 -c 'print "argv.py -\f-"')
E           
E           $SH -c "$tab"
E           $SH -c "$cr"
E           $SH -c "$vert"
E           $SH -c "$ff"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[whitespace.test.sh::\\r in arith expression is allowed by some shells, but not most![L25]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109447230>
test_file = 'whitespace.test.sh'
test_case = TestCase(name='\\r in arith expression is allowed by some shells, but not most!', script='arith=$(python2 -c \'print "...ssertion(type='stdout', value="['3']\n['3']", shells=['mksh', 'ash', 'osh'], variant='OK')], line_number=25, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \r in arith expression is allowed by some shells, but not most! (line 25)
E           
E           stdout mismatch:
E             expected: "['3']\nfailed"
E             actual:   ''
E           
E           Expected stdout: "['3']\nfailed"
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           arith=$(python2 -c 'print "argv.py $(( 1 +\n2))"')
E           arith_cr=$(python2 -c 'print "argv.py $(( 1 +\r\n2))"')
E           
E           $SH -c "$arith"
E           if test $? -ne 0; then
E             echo 'failed'
E           fi
E           
E           $SH -c "$arith_cr"
E           if test $? -ne 0; then
E             echo 'failed'
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[whitespace.test.sh::whitespace in string to integer conversion[L51]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094472f0>
test_file = 'whitespace.test.sh'
test_case = TestCase(name='whitespace in string to integer conversion', script='tab=$(python2 -c \'print "\\t42\\t"\')\ncr=$(pytho...ne), Assertion(type='stdout', value='43\n43', shells=['mksh', 'ash', 'osh'], variant='OK')], line_number=51, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: whitespace in string to integer conversion (line 51)
E           
E           stdout mismatch:
E             expected: '43\nfailed'
E             actual:   '1\n1'
E           
E           Expected stdout: '43\nfailed'
E           Actual stdout:   '1\n1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           tab=$(python2 -c 'print "\t42\t"')
E           cr=$(python2 -c 'print "\r42\r"')
E           
E           $SH -c 'echo $(( $1 + 1 ))' dummy0 "$tab"
E           if test $? -ne 0; then
E             echo 'failed'
E           fi
E           
E           $SH -c 'echo $(( $1 + 1 ))' dummy0 "$cr"
E           if test $? -ne 0; then
E             echo 'failed'
E           fi
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[whitespace.test.sh::\\r at end of line is not special[L77]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094473b0>
test_file = 'whitespace.test.sh'
test_case = TestCase(name='\\r at end of line is not special', script='# hm I wonder if Windows ports have rules for this?\n\ncr=$... "$cr"', assertions=[Assertion(type='stdout', value="['-\\r']", shells=None, variant=None)], line_number=77, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: \r at end of line is not special (line 77)
E           
E           stdout mismatch:
E             expected: "['-\\r']"
E             actual:   ''
E           
E           Expected stdout: "['-\\r']"
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # hm I wonder if Windows ports have rules for this?
E           
E           cr=$(python2 -c 'print "argv.py -\r"')
E           
E           $SH -c "$cr"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[whitespace.test.sh::Default IFS does not include \\r \\v \\f[L90]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109447470>
test_file = 'whitespace.test.sh'
test_case = TestCase(name='Default IFS does not include \\r \\v \\f', script='# dash and zsh don\'t have echo -e\ntab=$(python2 -c...ut', value="['-\\t-']\n['-\\r-']\n['-\\x0b-']\n['-\\x0c-']", shells=['zsh'], variant='OK')], line_number=90, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Default IFS does not include \r \v \f (line 90)
E           
E           stdout mismatch:
E             expected: "['-', '-']\n['-\\r-']\n['-\\x0b-']\n['-\\x0c-']"
E             actual:   '[]\n[]\n[]\n[]'
E           
E           Expected stdout: "['-', '-']\n['-\\r-']\n['-\\x0b-']\n['-\\x0c-']"
E           Actual stdout:   '[]\n[]\n[]\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # dash and zsh don't have echo -e
E           tab=$(python2 -c 'print "-\t-"')
E           cr=$(python2 -c 'print "-\r-"')
E           vert=$(python2 -c 'print "-\v-"')
E           ff=$(python2 -c 'print "-\f-"')
E           
E           $SH -c 'argv.py $1' dummy0 "$tab"
E           $SH -c 'argv.py $1' dummy0 "$cr"
E           $SH -c 'argv.py $1' dummy0 "$vert"
E           $SH -c 'argv.py $1' dummy0 "$ff"
E           
E           
E           # No word splitting in zsh
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[word-eval.test.sh::Word joining[L35]] ____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109447770>
test_file = 'word-eval.test.sh'
test_case = TestCase(name='Word joining', script='set -- x y z\ns1=\'1 2\'\narray=(a1 a2)\nargv.py $s1"${array[@]}"_"$@"', asserti...'dash'], variant='N-I'), Assertion(type='status', value=2, shells=['dash'], variant='N-I')], line_number=35, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Word joining (line 35)
E           
E           stdout mismatch:
E             expected: "['1', '2a1', 'a2_x', 'y', 'z']"
E             actual:   "['1 2a1 a2_x', 'y', 'z']"
E           
E           Expected stdout: "['1', '2a1', 'a2_x', 'y', 'z']"
E           Actual stdout:   "['1 2a1 a2_x', 'y', 'z']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- x y z
E           s1='1 2'
E           array=(a1 a2)
E           argv.py $s1"${array[@]}"_"$@"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-eval.test.sh::Default values -- more cases[L49]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094478f0>
test_file = 'word-eval.test.sh'
test_case = TestCase(name='Default values -- more cases', script='argv.py ${undef:-hi} ${undef:-\'a b\'} "${undef:-c d}" "${un:-"e...tdout', value='[\'hi\', \'a b\', \'c d\', \'e f\', "\'g h\'"]', shells=None, variant=None)], line_number=49, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Default values -- more cases (line 49)
E           
E           stdout mismatch:
E             expected: '[\'hi\', \'a b\', \'c d\', \'e f\', "\'g h\'"]'
E             actual:   "['hi', 'a', 'b', 'c d', 'e f', 'g h']"
E           
E           Expected stdout: '[\'hi\', \'a b\', \'c d\', \'e f\', "\'g h\'"]'
E           Actual stdout:   "['hi', 'a', 'b', 'c d', 'e f', 'g h']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           argv.py ${undef:-hi} ${undef:-'a b'} "${undef:-c d}" "${un:-"e f"}" "${un:-'g h'}"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-eval.test.sh::Globbing after splitting[L53]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x1094479b0>
test_file = 'word-eval.test.sh'
test_case = TestCase(name='Globbing after splitting', script="mkdir -p _tmp\ntouch _tmp/foo.gg _tmp/bar.gg _tmp/foo.hh\npat='_tmp/...stdout', value="['_tmp/foo.hh', '_tmp/bar.gg', '_tmp/foo.gg']", shells=None, variant=None)], line_number=53, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Globbing after splitting (line 53)
E           
E           stdout mismatch:
E             expected: "['_tmp/foo.hh', '_tmp/bar.gg', '_tmp/foo.gg']"
E             actual:   "['_tmp/*.hh', '_tmp/*.gg']"
E           
E           Expected stdout: "['_tmp/foo.hh', '_tmp/bar.gg', '_tmp/foo.gg']"
E           Actual stdout:   "['_tmp/*.hh', '_tmp/*.gg']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkdir -p _tmp
E           touch _tmp/foo.gg _tmp/bar.gg _tmp/foo.hh
E           pat='_tmp/*.hh _tmp/*.gg'
E           argv.py $pat
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-eval.test.sh::Globbing escaping[L60]] __

self = <spec_tests.test_spec.TestBashSpecTests object at 0x109447a70>
test_file = 'word-eval.test.sh'
test_case = TestCase(name='Globbing escaping', script="mkdir -p _tmp\ntouch '_tmp/[bc]ar.mm' # file that looks like a glob pattern... value="['_tmp/[bc]ar.mm', '-', '_tmp/bar.mm', '_tmp/car.mm']", shells=None, variant=None)], line_number=60, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Globbing escaping (line 60)
E           
E           stdout mismatch:
E             expected: "['_tmp/[bc]ar.mm', '-', '_tmp/bar.mm', '_tmp/car.mm']"
E             actual:   "['_tmp/bar.mm', '_tmp/car.mm', '-', '_tmp/bar.mm', '_tmp/car.mm']"
E           
E           Expected stdout: "['_tmp/[bc]ar.mm', '-', '_tmp/bar.mm', '_tmp/car.mm']"
E           Actual stdout:   "['_tmp/bar.mm', '_tmp/car.mm', '-', '_tmp/bar.mm', '_tmp/car.mm']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkdir -p _tmp
E           touch '_tmp/[bc]ar.mm' # file that looks like a glob pattern
E           touch _tmp/bar.mm _tmp/car.mm
E           argv.py '_tmp/[bc]'*.mm - _tmp/?ar.mm
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::$* with empty IFS[L73]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946c230>
test_file = 'word-split.test.sh'
test_case = TestCase(name='$* with empty IFS', script='set -- "1 2" "3  4"\n\nIFS=\nargv.py $*\nargv.py "$*"', assertions=[Assertion(type='stdout', value="['1 2', '3  4']\n['1 23  4']", shells=None, variant=None)], line_number=73, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: $* with empty IFS (line 73)
E           
E           stdout mismatch:
E             expected: "['1 2', '3  4']\n['1 23  4']"
E             actual:   "['1 23  4']\n['1 23  4']"
E           
E           Expected stdout: "['1 2', '3  4']\n['1 23  4']"
E           Actual stdout:   "['1 23  4']\n['1 23  4']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- "1 2" "3  4"
E           
E           IFS=
E           argv.py $*
E           argv.py "$*"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::Leading ' ' vs leading ' _ '[L117]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946c530>
test_file = 'word-split.test.sh'
test_case = TestCase(name="Leading ' ' vs leading ' _ '", script="# This behavior is weird, but all shells agree.\nIFS='_ '\ns1='_...=[Assertion(type='stdout', value="['', 'a', 'b']\n['a', 'b']", shells=None, variant=None)], line_number=117, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Leading ' ' vs leading ' _ ' (line 117)
E           
E           stdout mismatch:
E             expected: "['', 'a', 'b']\n['a', 'b']"
E             actual:   "['', 'a', 'b', '']\n['a', 'b', '']"
E           
E           Expected stdout: "['', 'a', 'b']\n['a', 'b']"
E           Actual stdout:   "['', 'a', 'b', '']\n['a', 'b', '']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # This behavior is weird, but all shells agree.
E           IFS='_ '
E           s1='_ a  b _ '
E           s2='  a  b _ '
E           argv.py $s1
E           argv.py $s2
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::IFS with whitespace and non-whitepace.[L135]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946c6b0>
test_file = 'word-split.test.sh'
test_case = TestCase(name='IFS with whitespace and non-whitepace.', script="# NOTE: Three delimiters means two empty words in the ...tion(type='stdout', value="['a', 'b', '', '', 'c', 'd', 'e']", shells=None, variant=None)], line_number=135, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS with whitespace and non-whitepace. (line 135)
E           
E           stdout mismatch:
E             expected: "['a', 'b', '', '', 'c', 'd', 'e']"
E             actual:   "['a', 'b', '', '', '', 'c', '', 'd', 'e']"
E           
E           Expected stdout: "['a', 'b', '', '', 'c', 'd', 'e']"
E           Actual stdout:   "['a', 'b', '', '', '', 'c', '', 'd', 'e']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # NOTE: Three delimiters means two empty words in the middle.  No elision.
E           IFS='_ '
E           s1='a_b _ _ _ c  _d e'
E           argv.py $s1
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::empty literals are not elided[L157]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946c9b0>
test_file = 'word-split.test.sh'
test_case = TestCase(name='empty literals are not elided', script='space=" "\nargv.py 1 $space"" 2', assertions=[Assertion(type='stdout', value="['1', '', '2']", shells=None, variant=None)], line_number=157, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: empty literals are not elided (line 157)
E           
E           stdout mismatch:
E             expected: "['1', '', '2']"
E             actual:   "['1', '2']"
E           
E           Expected stdout: "['1', '', '2']"
E           Actual stdout:   "['1', '2']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           space=" "
E           argv.py 1 $space"" 2
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::default value can yield multiple words[L168]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946cb30>
test_file = 'word-split.test.sh'
test_case = TestCase(name='default value can yield multiple words', script='argv.py 1 ${undefined:-"2 3" "4 5"} 6', assertions=[Assertion(type='stdout', value="['1', '2 3', '4 5', '6']", shells=None, variant=None)], line_number=168, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: default value can yield multiple words (line 168)
E           
E           stdout mismatch:
E             expected: "['1', '2 3', '4 5', '6']"
E             actual:   "['1', '2', '3', '4', '5', '6']"
E           
E           Expected stdout: "['1', '2 3', '4 5', '6']"
E           Actual stdout:   "['1', '2', '3', '4', '5', '6']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           argv.py 1 ${undefined:-"2 3" "4 5"} 6
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::default value can yield multiple words with part joining[L172]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946cbf0>
test_file = 'word-split.test.sh'
test_case = TestCase(name='default value can yield multiple words with part joining', script='argv.py 1${undefined:-"2 3" "4 5"}6', assertions=[Assertion(type='stdout', value="['12 3', '4 56']", shells=None, variant=None)], line_number=172, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: default value can yield multiple words with part joining (line 172)
E           
E           stdout mismatch:
E             expected: "['12 3', '4 56']"
E             actual:   "['12', '3', '4', '56']"
E           
E           Expected stdout: "['12 3', '4 56']"
E           Actual stdout:   "['12', '3', '4', '56']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           argv.py 1${undefined:-"2 3" "4 5"}6
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::default value with unquoted IFS char[L176]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946ccb0>
test_file = 'word-split.test.sh'
test_case = TestCase(name='default value with unquoted IFS char', script='IFS=_\nargv.py 1${undefined:-"2_3"x_x"4_5"}6', assertions=[Assertion(type='stdout', value="['12_3x', 'x4_56']", shells=None, variant=None)], line_number=176, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: default value with unquoted IFS char (line 176)
E           
E           stdout mismatch:
E             expected: "['12_3x', 'x4_56']"
E             actual:   "['12', '3x', 'x4', '56']"
E           
E           Expected stdout: "['12_3x', 'x4_56']"
E           Actual stdout:   "['12', '3x', 'x4', '56']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           IFS=_
E           argv.py 1${undefined:-"2_3"x_x"4_5"}6
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::IFS empty doesn't do splitting[L181]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946cd70>
test_file = 'word-split.test.sh'
test_case = TestCase(name="IFS empty doesn't do splitting", script='IFS=\'\'\nx=$(python2 -c \'print(" a b\\tc\\n")\')\nargv.py $x', assertions=[Assertion(type='stdout', value="[' a b\\tc']", shells=None, variant=None)], line_number=181, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS empty doesn't do splitting (line 181)
E           
E           stdout mismatch:
E             expected: "[' a b\\tc']"
E             actual:   '[]'
E           
E           Expected stdout: "[' a b\\tc']"
E           Actual stdout:   '[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           IFS=''
E           x=$(python2 -c 'print(" a b\tc\n")')
E           argv.py $x
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::IFS unset behaves like $' \\t\\n'[L190]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946ce30>
test_file = 'word-split.test.sh'
test_case = TestCase(name="IFS unset behaves like $' \\t\\n'", script='unset IFS\nx=$(python2 -c \'print(" a b\\tc\\n")\')\nargv.p... assertions=[Assertion(type='stdout', value="['a', 'b', 'c']", shells=None, variant=None)], line_number=190, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS unset behaves like $' \t\n' (line 190)
E           
E           stdout mismatch:
E             expected: "['a', 'b', 'c']"
E             actual:   '[]'
E           
E           Expected stdout: "['a', 'b', 'c']"
E           Actual stdout:   '[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           unset IFS
E           x=$(python2 -c 'print(" a b\tc\n")')
E           argv.py $x
E           ---

tests/spec_tests/test_spec.py:218: Failed
____ TestBashSpecTests.test_spec_case[word-split.test.sh::IFS='\\ '[L208]] _____

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946cfb0>
test_file = 'word-split.test.sh'
test_case = TestCase(name="IFS='\\ '", script="# NOTE: OSH fails this because of double backslash escaping issue!\n# When IFS is \...ns=[Assertion(type='stdout', value="['a', 'b', '', 'c', 'd']", shells=None, variant=None)], line_number=208, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS='\ ' (line 208)
E           
E           stdout mismatch:
E             expected: "['a', 'b', '', 'c', 'd']"
E             actual:   "['a', 'b', '', '', 'c', 'd']"
E           
E           Expected stdout: "['a', 'b', '', 'c', 'd']"
E           Actual stdout:   "['a', 'b', '', '', 'c', 'd']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # NOTE: OSH fails this because of double backslash escaping issue!
E           # When IFS is \, then you're no longer using backslash escaping.
E           IFS='\ '
E           s='a\b \\ c d\'
E           argv.py $s
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::IFS and joining arrays[L271]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946d430>
test_file = 'word-split.test.sh'
test_case = TestCase(name='IFS and joining arrays', script='IFS=:\nset -- x \'y z\'\nargv.py "$@"\nargv.py $@\nargv.py "$*"\nargv...., value="['x', 'y z']\n['x', 'y z']\n['x:y z']\n['x', 'y z']", shells=None, variant=None)], line_number=271, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS and joining arrays (line 271)
E           
E           stdout mismatch:
E             expected: "['x', 'y z']\n['x', 'y z']\n['x:y z']\n['x', 'y z']"
E             actual:   "['x', 'y z']\n['x y z']\n['x:y z']\n['x', 'y z']"
E           
E           Expected stdout: "['x', 'y z']\n['x', 'y z']\n['x:y z']\n['x', 'y z']"
E           Actual stdout:   "['x', 'y z']\n['x y z']\n['x:y z']\n['x', 'y z']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           IFS=:
E           set -- x 'y z'
E           argv.py "$@"
E           argv.py $@
E           argv.py "$*"
E           argv.py $*
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::IFS='' with $@ and $* (bug #627)[L339]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946d670>
test_file = 'word-split.test.sh'
test_case = TestCase(name="IFS='' with $@ and $* (bug #627)", script="set -- a 'b c'\nIFS=''\nargv.py at $@\nargv.py star $*\n\n# ...pe='stdout', value="['at', 'a', 'b c']\n['star', 'a', 'b c']", shells=None, variant=None)], line_number=339, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS='' with $@ and $* (bug #627) (line 339)
E           
E           stdout mismatch:
E             expected: "['at', 'a', 'b c']\n['star', 'a', 'b c']"
E             actual:   "['at', 'a b c']\n['star', 'ab c']"
E           
E           Expected stdout: "['at', 'a', 'b c']\n['star', 'a', 'b c']"
E           Actual stdout:   "['at', 'a b c']\n['star', 'ab c']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- a 'b c'
E           IFS=''
E           argv.py at $@
E           argv.py star $*
E           
E           # zsh agrees
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::IFS='' with $@ and $* and printf (bug #627)[L351]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946d730>
test_file = 'word-split.test.sh'
test_case = TestCase(name="IFS='' with $@ and $* and printf (bug #627)", script="set -- a 'b c'\nIFS=''\nprintf '[%s]\\n' $@\nprin...ions=[Assertion(type='stdout', value='[a]\n[b c]\n[a]\n[b c]', shells=None, variant=None)], line_number=351, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS='' with $@ and $* and printf (bug #627) (line 351)
E           
E           stdout mismatch:
E             expected: '[a]\n[b c]\n[a]\n[b c]'
E             actual:   '[a b c]\n[ab c]'
E           
E           Expected stdout: '[a]\n[b c]\n[a]\n[b c]'
E           Actual stdout:   '[a b c]\n[ab c]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- a 'b c'
E           IFS=''
E           printf '[%s]\n' $@
E           printf '[%s]\n' $*
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::IFS='' with ${a[@]} and ${a[*]} (bug #627)[L363]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946d7f0>
test_file = 'word-split.test.sh'
test_case = TestCase(name="IFS='' with ${a[@]} and ${a[*]} (bug #627)", script="case $SH in dash | ash) exit 0 ;; esac\n\nmyarray=...ant=None), Assertion(type='stdout-json', value='', shells=['dash', 'ash'], variant='N-I')], line_number=363, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS='' with ${a[@]} and ${a[*]} (bug #627) (line 363)
E           
E           stdout mismatch:
E             expected: "['at', 'a', 'b c']\n['star', 'a', 'b c']"
E             actual:   "['at', 'a b c']\n['star', 'ab c']"
E           
E           Expected stdout: "['at', 'a', 'b c']\n['star', 'a', 'b c']"
E           Actual stdout:   "['at', 'a b c']\n['star', 'ab c']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash | ash) exit 0 ;; esac
E           
E           myarray=(a 'b c')
E           IFS=''
E           argv.py at ${myarray[@]}
E           argv.py star ${myarray[*]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::IFS='' with ${!prefix@} and ${!prefix*} (bug #627)[L377]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946d8b0>
test_file = 'word-split.test.sh'
test_case = TestCase(name="IFS='' with ${!prefix@} and ${!prefix*} (bug #627)", script="case $SH in dash | mksh | ash | yash) exit...tion(type='stdout-json', value='', shells=['dash', 'mksh', 'ash', 'yash'], variant='N-I')], line_number=377, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS='' with ${!prefix@} and ${!prefix*} (bug #627) (line 377)
E           
E           stdout mismatch:
E             expected: "['at', 'gLwbmGzS_var1', 'gLwbmGzS_var2']\n['star', 'gLwbmGzS_var1gLwbmGzS_var2']"
E             actual:   "['at', 'gLwbmGzS_var1 gLwbmGzS_var2']\n['star', 'gLwbmGzS_var1 gLwbmGzS_var2']"
E           
E           Expected stdout: "['at', 'gLwbmGzS_var1', 'gLwbmGzS_var2']\n['star', 'gLwbmGzS_var1gLwbmGzS_var2']"
E           Actual stdout:   "['at', 'gLwbmGzS_var1 gLwbmGzS_var2']\n['star', 'gLwbmGzS_var1 gLwbmGzS_var2']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash | mksh | ash | yash) exit 0 ;; esac
E           
E           gLwbmGzS_var1=1
E           gLwbmGzS_var2=2
E           IFS=''
E           argv.py at ${!gLwbmGzS_@}
E           argv.py star ${!gLwbmGzS_*}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::IFS='' with ${!a[@]} and ${!a[*]} (bug #627)[L396]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946d970>
test_file = 'word-split.test.sh'
test_case = TestCase(name="IFS='' with ${!a[@]} and ${!a[*]} (bug #627)", script="case $SH in dash | mksh | ash | yash) exit 0 ;; ...tion(type='stdout-json', value='', shells=['dash', 'mksh', 'ash', 'yash'], variant='N-I')], line_number=396, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS='' with ${!a[@]} and ${!a[*]} (bug #627) (line 396)
E           
E           stdout mismatch:
E             expected: "['at', '0', '1', '2']\n['star', '0 1 2']"
E             actual:   "['at', '0 1 2']\n['star', '0 1 2']"
E           
E           Expected stdout: "['at', '0', '1', '2']\n['star', '0 1 2']"
E           Actual stdout:   "['at', '0 1 2']\n['star', '0 1 2']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash | mksh | ash | yash) exit 0 ;; esac
E           
E           IFS=''
E           a=(v1 v2 v3)
E           argv.py at ${!a[@]}
E           argv.py star ${!a[*]}
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::Bug #628 split on : with : in literal word[L414]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946da30>
test_file = 'word-split.test.sh'
test_case = TestCase(name='Bug #628 split on : with : in literal word', script="# 2025-03: What's the cause of this bug?\n#\n# OSH...value="['a', ':b']\n['a', ':']\n---\n['a', 'zb']\n['a', 'z']", shells=None, variant=None)], line_number=414, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Bug #628 split on : with : in literal word (line 414)
E           
E           stdout mismatch:
E             expected: "['a', ':b']\n['a', ':']\n---\n['a', 'zb']\n['a', 'z']"
E             actual:   "['a', '', 'b']\n['a', '']\n---\n['a', '', 'b']\n['a', '']"
E           
E           Expected stdout: "['a', ':b']\n['a', ':']\n---\n['a', 'zb']\n['a', 'z']"
E           Actual stdout:   "['a', '', 'b']\n['a', '']\n---\n['a', '', 'b']\n['a', '']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # 2025-03: What's the cause of this bug?
E           #
E           # OSH is very wrong here
E           #   ['a', '\\', 'b']
E           # Is this a fundamental problem with the IFS state machine?
E           # It definitely relates to the use of backslashes.
E           # So we have at least 4 backslash bugs
E           
E           IFS=':'
E           word='a:'
E           argv.py ${word}:b
E           argv.py ${word}:
E           
E           echo ---
E           
E           # Same thing happens for 'z'
E           IFS='z'
E           word='az'
E           argv.py ${word}zb
E           argv.py ${word}z
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::Bug #1664, \\\\ with noglob[L452]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946dbb0>
test_file = 'word-split.test.sh'
test_case = TestCase(name='Bug #1664, \\\\ with noglob', script='# Note that we\'re not changing IFS\n\nargv.py [\\\\]_\nargv.py "...="['[\\\\]_']\n['[\\\\]_']\nnoglob\n['[\\\\]_']\n['[\\\\]_']", shells=None, variant=None)], line_number=452, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Bug #1664, \\ with noglob (line 452)
E           
E           stdout mismatch:
E             expected: "['[\\\\]_']\n['[\\\\]_']\nnoglob\n['[\\\\]_']\n['[\\\\]_']"
E             actual:   "['[]_']\n['[\\\\]_']\nnoglob\n['[]_']\n['[\\\\]_']"
E           
E           Expected stdout: "['[\\\\]_']\n['[\\\\]_']\nnoglob\n['[\\\\]_']\n['[\\\\]_']"
E           Actual stdout:   "['[]_']\n['[\\\\]_']\nnoglob\n['[]_']\n['[\\\\]_']\n"
E           Expected stderr: None
E           Actual stderr:   'bash: set: -f: invalid option\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Note that we're not changing IFS
E           
E           argv.py [\\]_
E           argv.py "[\\]_"
E           
E           # TODO: no difference observed here, go back to original bug
E           
E           #argv.py [\\_
E           #argv.py "[\\_"
E           
E           echo noglob
E           
E           # repeat cases with -f, noglob
E           set -f
E           
E           argv.py [\\]_
E           argv.py "[\\]_"
E           
E           #argv.py [\\_
E           #argv.py "[\\_"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::Empty IFS bug #2141 (from pnut)[L484]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946dc70>
test_file = 'word-split.test.sh'
test_case = TestCase(name='Empty IFS bug #2141 (from pnut)', script='res=0\nsum() {\n  # implement callee-save calling convention ...= 42\n12 + 30 = 42\n12 + 30 = 42\n12 + 30 = 42\n12 + 30 = 42', shells=None, variant=None)], line_number=484, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Empty IFS bug #2141 (from pnut) (line 484)
E           
E           stdout mismatch:
E             expected: '12 + 30 = 42\n12 + 30 = 42\n12 + 30 = 42\n12 + 30 = 42\n12 + 30 = 42'
E             actual:   '12 + 30 = 42\n12 + 30 = 42\n12 30 + 0 = 0\n12 30 +  = 0'
E           
E           Expected stdout: '12 + 30 = 42\n12 + 30 = 42\n12 + 30 = 42\n12 + 30 = 42\n12 + 30 = 42'
E           Actual stdout:   '12 + 30 = 42\n12 + 30 = 42\n12 30 + 0 = 0\n12 30 +  = 0\n'
E           Expected stderr: None
E           Actual stderr:   'bash: 2: unbound variable\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           res=0
E           sum() {
E             # implement callee-save calling convention using `set`
E             # here, we save the value of $res after the function parameters
E             set $@ $res           # $1 $2 $3 are now set
E             res=$(($1 + $2))
E             echo "$1 + $2 = $res"
E             res=$3                # restore the value of $res
E           }
E           
E           unset IFS
E           sum 12 30 # outputs "12 + 30 = 42"
E           
E           IFS=' '
E           sum 12 30 # outputs "12 + 30 = 42"
E           
E           IFS=
E           sum 12 30 # outputs "1230 + 0 = 1230"
E           
E           # I added this
E           IFS=''
E           sum 12 30
E           
E           set -u
E           IFS=
E           sum 12 30 # fails with "fatal: Undefined variable '2'" on res=$(($1 + $2))
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::4 x 3 table: (default IFS, IFS='', IFS=zx) x ( $* "$*" $@ "$@" )[L542]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946ddf0>
test_file = 'word-split.test.sh'
test_case = TestCase(name='4 x 3 table: (default IFS, IFS=\'\', IFS=zx) x ( $* "$*" $@ "$@" )', script='setopt SH_WORD_SPLIT  # fo...a b\', \'c\', \'\']\n[\' "$@" \', \'a b\', \'c\', \'\']', shells=['yash'], variant='BUG')], line_number=542, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: 4 x 3 table: (default IFS, IFS='', IFS=zx) x ( $* "$*" $@ "$@" ) (line 542)
E           
E           stdout mismatch:
E             expected: '[\'  $*  \', \'a\', \'b\', \'c\']\n[\' "$*" \', \'a b c \']\n[\'  $@  \', \'a\', \'b\', \'c\']\n[\' "$@" \', \'a b\', \'c\', \'\']\n\n[\'  $*  \', \'a b\', \'c\']\n[\' "$*" \', \'a bc\']\n[\'  $@  \', \'a b\', \'c\']\n[\' "$@" \', \'a b\', \'c\', \'\']\n\n[\'  $*  \', \'a b\', \'c\']\n[\' "$*" \', \'a bzcz\']\n[\'  $@  \', \'a b\', \'c\']\n[\' "$@" \', \'a b\', \'c\', \'\']'
E             actual:   '[\'  $*  \', \'a\', \'b\', \'c\']\n[\' "$*" \', \'a b c \']\n[\'  $@  \', \'a\', \'b\', \'c\']\n[\' "$@" \', \'a b\', \'c\', \'\']\n\n[\'  $*  \', \'a bc\']\n[\' "$*" \', \'a bc\']\n[\'  $@  \', \'a b c \']\n[\' "$@" \', \'a b\', \'c\', \'\']\n\n[\'  $*  \', \'a b\', \'c\']\n[\' "$*" \', \'a bzcz\']\n[\'  $@  \', \'a b c \']\n[\' "$@" \', \'a b\', \'c\', \'\']'
E           
E           Expected stdout: '[\'  $*  \', \'a\', \'b\', \'c\']\n[\' "$*" \', \'a b c \']\n[\'  $@  \', \'a\', \'b\', \'c\']\n[\' "$@" \', \'a b\', \'c\', \'\']\n\n[\'  $*  \', \'a b\', \'c\']\n[\' "$*" \', \'a bc\']\n[\'  $@  \', \'a b\', \'c\']\n[\' "$@" \', \'a b\', \'c\', \'\']\n\n[\'  $*  \', \'a b\', \'c\']\n[\' "$*" \', \'a bzcz\']\n[\'  $@  \', \'a b\', \'c\']\n[\' "$@" \', \'a b\', \'c\', \'\']'
E           Actual stdout:   '[\'  $*  \', \'a\', \'b\', \'c\']\n[\' "$*" \', \'a b c \']\n[\'  $@  \', \'a\', \'b\', \'c\']\n[\' "$@" \', \'a b\', \'c\', \'\']\n\n[\'  $*  \', \'a bc\']\n[\' "$*" \', \'a bc\']\n[\'  $@  \', \'a b c \']\n[\' "$@" \', \'a b\', \'c\', \'\']\n\n[\'  $*  \', \'a b\', \'c\']\n[\' "$*" \', \'a bzcz\']\n[\'  $@  \', \'a b c \']\n[\' "$@" \', \'a b\', \'c\', \'\']\n'
E           Expected stderr: None
E           Actual stderr:   'bash: setopt: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           setopt SH_WORD_SPLIT  # for zsh
E           
E           set -- 'a b' c ''
E           
E           # default IFS
E           argv.py '  $*  '  $*
E           argv.py ' "$*" ' "$*"
E           argv.py '  $@  '  $@
E           argv.py ' "$@" ' "$@"
E           echo
E           
E           IFS=''
E           argv.py '  $*  '  $*
E           argv.py ' "$*" ' "$*"
E           argv.py '  $@  '  $@
E           argv.py ' "$@" ' "$@"
E           echo
E           
E           IFS=zx
E           argv.py '  $*  '  $*
E           argv.py ' "$*" ' "$*"
E           argv.py '  $@  '  $@
E           argv.py ' "$@" ' "$@"
E           
E           
E           # zsh disagrees on
E           # - $@ with default IFS an
E           # - $@ with IFS=zx
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::4 x 3 table - with for loop[L623]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946deb0>
test_file = 'word-split.test.sh'
test_case = TestCase(name='4 x 3 table - with for loop', script='case $SH in yash) exit ;; esac  # no echo -n\n\nsetopt SH_WORD_SP...s=None, variant=None), Assertion(type='stdout', value='', shells=['yash'], variant='N-I')], line_number=623, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: 4 x 3 table - with for loop (line 623)
E           
E           stdout mismatch:
E             expected: '  $*   -a- -b- -c-\n "$*"  -a b c -\n  $@   -a- -b- -c-\n "$@"  -a b- -c- --\n\n  $*   -a b- -c-\n "$*"  -a bc-\n  $@   -a b- -c-\n "$@"  -a b- -c- --\n\n  $*   -a b- -c-\n "$*"  -a b c -\n  $@   -a b- -c-\n "$@"  -a b- -c- --'
E             actual:   '  $*   -a- -b- -c-\n "$*"  -a b c -\n  $@   -a- -b- -c-\n "$@"  -a b- -c- --\n\n  $*   -a bc-\n "$*"  -a bc-\n  $@   -a b c -\n "$@"  -a b- -c- --\n\n  $*   -a b- -c-\n "$*"  -a b c -\n  $@   -a b c -\n "$@"  -a b- -c- --'
E           
E           Expected stdout: '  $*   -a- -b- -c-\n "$*"  -a b c -\n  $@   -a- -b- -c-\n "$@"  -a b- -c- --\n\n  $*   -a b- -c-\n "$*"  -a bc-\n  $@   -a b- -c-\n "$@"  -a b- -c- --\n\n  $*   -a b- -c-\n "$*"  -a b c -\n  $@   -a b- -c-\n "$@"  -a b- -c- --'
E           Actual stdout:   '  $*   -a- -b- -c-\n "$*"  -a b c -\n  $@   -a- -b- -c-\n "$@"  -a b- -c- --\n\n  $*   -a bc-\n "$*"  -a bc-\n  $@   -a b c -\n "$@"  -a b- -c- --\n\n  $*   -a b- -c-\n "$*"  -a b c -\n  $@   -a b c -\n "$@"  -a b- -c- --\n'
E           Expected stderr: None
E           Actual stderr:   'bash: setopt: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in yash) exit ;; esac  # no echo -n
E           
E           setopt SH_WORD_SPLIT  # for zsh
E           
E           set -- 'a b' c ''
E           
E           # default IFS
E           echo -n '  $*  ';  for i in  $*;  do echo -n ' '; echo -n -$i-; done; echo
E           echo -n ' "$*" ';  for i in "$*"; do echo -n ' '; echo -n -$i-; done; echo
E           echo -n '  $@  ';  for i in  $@;  do echo -n ' '; echo -n -$i-; done; echo
E           echo -n ' "$@" ';  for i in "$@"; do echo -n ' '; echo -n -$i-; done; echo
E           echo
E           
E           IFS=''
E           echo -n '  $*  ';  for i in  $*;  do echo -n ' '; echo -n -$i-; done; echo
E           echo -n ' "$*" ';  for i in "$*"; do echo -n ' '; echo -n -$i-; done; echo
E           echo -n '  $@  ';  for i in  $@;  do echo -n ' '; echo -n -$i-; done; echo
E           echo -n ' "$@" ';  for i in "$@"; do echo -n ' '; echo -n -$i-; done; echo
E           echo
E           
E           IFS=zx
E           echo -n '  $*  ';  for i in  $*;  do echo -n ' '; echo -n -$i-; done; echo
E           echo -n ' "$*" ';  for i in "$*"; do echo -n ' '; echo -n -$i-; done; echo
E           echo -n '  $@  ';  for i in  $@;  do echo -n ' '; echo -n -$i-; done; echo
E           echo -n ' "$@" ';  for i in "$@"; do echo -n ' '; echo -n -$i-; done; echo
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::IFS=x and '' and $@ - same bug as spec/toysh-posix case #12[L670]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946df70>
test_file = 'word-split.test.sh'
test_case = TestCase(name="IFS=x and '' and $@ - same bug as spec/toysh-posix case #12", script='case $SH in yash) exit ;; esac  #...s=None, variant=None), Assertion(type='stdout', value='', shells=['yash'], variant='N-I')], line_number=670, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS=x and '' and $@ - same bug as spec/toysh-posix case #12 (line 670)
E           
E           stdout mismatch:
E             expected: '  $*   -one- -- -two-\n "$*"  -one  two-\n  $@   -one- -- -two-\n "$@"  -one- -- -two-\n[\'  $*  \', \'one\', \'\', \'two\']\n[\' "$*" \', \'onezztwo\']\n[\'  $@  \', \'one\', \'\', \'two\']\n[\' "$@" \', \'one\', \'\', \'two\']'
E             actual:   '  $*   -one- -- -two-\n "$*"  -one  two-\n  $@   -one  two-\n "$@"  -one- -- -two-\n[\'  $*  \', \'one\', \'\', \'two\']\n[\' "$*" \', \'onezztwo\']\n[\'  $@  \', \'one  two\']\n[\' "$@" \', \'one\', \'\', \'two\']'
E           
E           Expected stdout: '  $*   -one- -- -two-\n "$*"  -one  two-\n  $@   -one- -- -two-\n "$@"  -one- -- -two-\n[\'  $*  \', \'one\', \'\', \'two\']\n[\' "$*" \', \'onezztwo\']\n[\'  $@  \', \'one\', \'\', \'two\']\n[\' "$@" \', \'one\', \'\', \'two\']'
E           Actual stdout:   '  $*   -one- -- -two-\n "$*"  -one  two-\n  $@   -one  two-\n "$@"  -one- -- -two-\n[\'  $*  \', \'one\', \'\', \'two\']\n[\' "$*" \', \'onezztwo\']\n[\'  $@  \', \'one  two\']\n[\' "$@" \', \'one\', \'\', \'two\']\n'
E           Expected stderr: None
E           Actual stderr:   'bash: setopt: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in yash) exit ;; esac  # no echo -n
E           
E           setopt SH_WORD_SPLIT  # for zsh
E           
E           set -- one '' two
E           
E           IFS=zx
E           echo -n '  $*  ';  for i in  $*;  do echo -n ' '; echo -n -$i-; done; echo
E           echo -n ' "$*" ';  for i in "$*"; do echo -n ' '; echo -n -$i-; done; echo
E           echo -n '  $@  ';  for i in  $@;  do echo -n ' '; echo -n -$i-; done; echo
E           echo -n ' "$@" ';  for i in "$@"; do echo -n ' '; echo -n -$i-; done; echo
E           
E           argv.py '  $*  '  $*
E           argv.py ' "$*" ' "$*"
E           argv.py '  $@  '  $@
E           argv.py ' "$@" ' "$@"
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::IFS=x and '' and $@ (#2)[L714]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946e030>
test_file = 'word-split.test.sh'
test_case = TestCase(name="IFS=x and '' and $@ (#2)", script='setopt SH_WORD_SPLIT  # for zsh\n\nset -- "" "" "" "" ""\nargv.py =$... '=']\n\n['=', '', '', '', '=']\n['=', '', '', '', '=']", shells=['yash'], variant='BUG')], line_number=714, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: IFS=x and '' and $@ (#2) (line 714)
E           
E           stdout mismatch:
E             expected: "['=', '=']\n['=', '=']\n\n['=', '=']\n['=', '=']\n\n['=', '', '', '', '=']\n['=', '', '', '', '=']"
E             actual:   "['=', '=']\n['=', '=']\n\n['=    =']\n['==']\n\n['=    =']\n['=', '', '', '', '=']"
E           
E           Expected stdout: "['=', '=']\n['=', '=']\n\n['=', '=']\n['=', '=']\n\n['=', '', '', '', '=']\n['=', '', '', '', '=']"
E           Actual stdout:   "['=', '=']\n['=', '=']\n\n['=    =']\n['==']\n\n['=    =']\n['=', '', '', '', '=']\n"
E           Expected stderr: None
E           Actual stderr:   'bash: setopt: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           setopt SH_WORD_SPLIT  # for zsh
E           
E           set -- "" "" "" "" ""
E           argv.py =$@=
E           argv.py =$*=
E           echo
E           
E           IFS=
E           argv.py =$@=
E           argv.py =$*=
E           echo
E           
E           IFS=x
E           argv.py =$@=
E           argv.py =$*=
E           
E           
E           
E           # yash-2.49 seems to behave in a strange way, but this behavior seems to have
E           # been fixed at least in yash-2.57.
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::""$A"" - empty string on both sides - derived from spec/toysh-posix #15[L815]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946e1b0>
test_file = 'word-split.test.sh'
test_case = TestCase(name='""$A"" - empty string on both sides - derived from spec/toysh-posix #15', script='A="   abc   def   "\n...==\n=abc=\n=def=\n==\n\n=abc=\n=def=\n\n==\n=abc=\n=def=\n==", shells=None, variant=None)], line_number=815, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: ""$A"" - empty string on both sides - derived from spec/toysh-posix #15 (line 815)
E           
E           stdout mismatch:
E             expected: "['abc', 'def']\n['', 'abc', 'def', '']\n['abc', 'def']\n['', 'abc', 'def', '']\n\n=abc=\n=def=\n\n==\n=abc=\n=def=\n==\n\n=abc=\n=def=\n\n==\n=abc=\n=def=\n=="
E             actual:   "['abc', 'def']\n['   abc   def   ']\n['abc', 'def']\n['   abc   def   ']\n\n=abc=\n=def=\n\n= abc def =\n\n=abc=\n=def=\n\n= abc def ="
E           
E           Expected stdout: "['abc', 'def']\n['', 'abc', 'def', '']\n['abc', 'def']\n['', 'abc', 'def', '']\n\n=abc=\n=def=\n\n==\n=abc=\n=def=\n==\n\n=abc=\n=def=\n\n==\n=abc=\n=def=\n=="
E           Actual stdout:   "['abc', 'def']\n['   abc   def   ']\n['abc', 'def']\n['   abc   def   ']\n\n=abc=\n=def=\n\n= abc def =\n\n=abc=\n=def=\n\n= abc def =\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           A="   abc   def   "
E           
E           argv.py $A
E           argv.py ""$A""
E           
E           unset IFS
E           
E           argv.py $A
E           argv.py ""$A""
E           
E           echo
E           
E           # Do the same thing in a for loop - this is IDENTICAL behavior
E           
E           for i in $A; do echo =$i=; done
E           echo
E           
E           for i in ""$A""; do echo =$i=; done
E           echo
E           
E           unset IFS
E           
E           for i in $A; do echo =$i=; done
E           echo
E           
E           for i in ""$A""; do echo =$i=; done
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[word-split.test.sh::Regression: "${v:-AxBxC}"x should not be split[L894]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946e3f0>
test_file = 'word-split.test.sh'
test_case = TestCase(name='Regression: "${v:-AxBxC}"x should not be split', script='IFS=x\nv=\necho "${v:-AxBxC}"\necho "${v:-AxBx...='AxBxC\nAxBxCx\nA B C\nA B Cx\nAxBxC\nAxBxCx\nAxBxC\nAxBxCx', shells=None, variant=None)], line_number=894, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Regression: "${v:-AxBxC}"x should not be split (line 894)
E           
E           stdout mismatch:
E             expected: 'AxBxC\nAxBxCx\nA B C\nA B Cx\nAxBxC\nAxBxCx\nAxBxC\nAxBxCx'
E             actual:   'AxBxC\nAxBxCx\nA B C\nA B C\nA B C\nA B C\nAxBxC\nAxBxCx'
E           
E           Expected stdout: 'AxBxC\nAxBxCx\nA B C\nA B Cx\nAxBxC\nAxBxCx\nAxBxC\nAxBxCx'
E           Actual stdout:   'AxBxC\nAxBxCx\nA B C\nA B C\nA B C\nA B C\nAxBxC\nAxBxCx\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           IFS=x
E           v=
E           echo "${v:-AxBxC}"
E           echo "${v:-AxBxC}"x  # <-- osh failed this
E           echo ${v:-AxBxC}
E           echo ${v:-AxBxC}x
E           echo ${v:-"AxBxC"}
E           echo ${v:-"AxBxC"}x
E           echo "${v:-"AxBxC"}"
E           echo "${v:-"AxBxC"}"x
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[xtrace.test.sh::set -o verbose prints unevaluated code[L28]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946e570>
test_file = 'xtrace.test.sh'
test_case = TestCase(name='set -o verbose prints unevaluated code', script='set -o verbose\nx=foo\ny=bar\necho $x\necho $(echo $y)...e='stderr', value='x=foo\ny=bar\necho $x\necho $(echo $y)', shells=['bash'], variant='OK')], line_number=28, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: set -o verbose prints unevaluated code (line 28)
E           
E           stderr mismatch:
E             expected: 'x=foo\ny=bar\necho $x\necho $(echo $y)'
E             actual:   ''
E           
E           Expected stdout: 'foo\nbar'
E           Actual stdout:   'foo\nbar\n'
E           Expected stderr: 'x=foo\ny=bar\necho $x\necho $(echo $y)'
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -o verbose
E           x=foo
E           y=bar
E           echo $x
E           echo $(echo $y)
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[xtrace.test.sh::xtrace with unprintable chars[L49]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946e630>
test_file = 'xtrace.test.sh'
test_case = TestCase(name='xtrace with unprintable chars', script='case $SH in dash) exit ;; esac\n\n$SH >stdout 2>stderr <<\'EOF\...], variant='BUG'), Assertion(type='stdout-json', value='', shells=['dash'], variant='N-I')], line_number=49, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: xtrace with unprintable chars (line 49)
E           
E           stdout mismatch:
E             expected: "STDOUT\n   a 003   b 004   c  \\n\n  61  03  62  04  63  0a\n\nSTDERR\n+ echo $'a\\003b\\004c'"
E             actual:   'STDOUT\n\nSTDERR'
E           
E           Expected stdout: "STDOUT\n   a 003   b 004   c  \\n\n  61  03  62  04  63  0a\n\nSTDERR\n+ echo $'a\\003b\\004c'"
E           Actual stdout:   'STDOUT\n\nSTDERR\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           $SH >stdout 2>stderr <<'EOF'
E           
E           s=$'a\x03b\004c\x00d'
E           set -o xtrace
E           echo "$s"
E           EOF
E           
E           show_hex() { od -A n -t c -t x1; }
E           
E           echo STDOUT
E           cat stdout | show_hex
E           echo
E           
E           echo STDERR
E           grep 'echo' stderr
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[xtrace.test.sh::xtrace with variables in PS4[L297]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946edb0>
test_file = 'xtrace.test.sh'
test_case = TestCase(name='xtrace with variables in PS4', script="PS4='+$x:'\nset -o xtrace\nx=1\necho one\nx=2\necho two", assert...at's OK\n+1:x=1\n+1:echo one\n+2:x=2\n+2:echo two", shells=['osh', 'dash'], variant='OK')], line_number=297, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: xtrace with variables in PS4 (line 297)
E           
E           stderr mismatch:
E             expected: '+:x=1\n+1:echo one\n+1:x=2\n+2:echo two'
E             actual:   ''
E           
E           Expected stdout: 'one\ntwo'
E           Actual stdout:   'one\ntwo\n'
E           Expected stderr: '+:x=1\n+1:echo one\n+1:x=2\n+2:echo two'
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           PS4='+$x:'
E           set -o xtrace
E           x=1
E           echo one
E           x=2
E           echo two
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[xtrace.test.sh::PS4 with unterminated ${[L332]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946ee70>
test_file = 'xtrace.test.sh'
test_case = TestCase(name='PS4 with unterminated ${', script="# osh shows inline error; maybe fail like dash/mksh?\nx=1\nPS4='+${x...['mksh'], variant='OK'), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=332, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: PS4 with unterminated ${ (line 332)
E           
E           stdout mismatch:
E             expected: 'one\nstatus=0'
E             actual:   ''
E           
E           Expected stdout: 'one\nstatus=0'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # osh shows inline error; maybe fail like dash/mksh?
E           x=1
E           PS4='+${x'
E           set -o xtrace
E           echo one
E           echo status=$?
E           # mksh and dash both fail.  bash prints errors to stderr.
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[xtrace.test.sh::PS4 with unterminated $([L349]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946ef30>
test_file = 'xtrace.test.sh'
test_case = TestCase(name='PS4 with unterminated $(', script="# osh shows inline error; maybe fail like dash/mksh?\nx=1\nPS4='+$(x...['mksh'], variant='OK'), Assertion(type='status', value=1, shells=['mksh'], variant='OK')], line_number=349, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: PS4 with unterminated $( (line 349)
E           
E           stdout mismatch:
E             expected: 'one\nstatus=0'
E             actual:   ''
E           
E           Expected stdout: 'one\nstatus=0'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # osh shows inline error; maybe fail like dash/mksh?
E           x=1
E           PS4='+$(x'
E           set -o xtrace
E           echo one
E           echo status=$?
E           # mksh and dash both fail.  bash prints errors to stderr.
E           ---

tests/spec_tests/test_spec.py:218: Failed
__ TestBashSpecTests.test_spec_case[xtrace.test.sh::Reading $? in PS4[L384]] ___

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946f0b0>
test_file = 'xtrace.test.sh'
test_case = TestCase(name='Reading $? in PS4', script="PS4='[last=$?] '\nset -x\nfalse\necho ok", assertions=[Assertion(type='stdo...(type='stderr', value="[last=0] 'false'\n[last=1] echo ok", shells=['osh'], variant='OK')], line_number=384, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: Reading $? in PS4 (line 384)
E           
E           stderr mismatch:
E             expected: '[last=0] false\n[last=1] echo ok'
E             actual:   ''
E           
E           Expected stdout: 'ok'
E           Actual stdout:   'ok\n'
E           Expected stderr: '[last=0] false\n[last=1] echo ok'
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           PS4='[last=$?] '
E           set -x
E           false
E           echo ok
E           ---

tests/spec_tests/test_spec.py:218: Failed
_ TestBashSpecTests.test_spec_case[zsh-idioms.test.sh::zsh var sub is rejected at runtime[L27]] _

self = <spec_tests.test_spec.TestBashSpecTests object at 0x10946f470>
test_file = 'zsh-idioms.test.sh'
test_case = TestCase(name='zsh var sub is rejected at runtime', script='eval \'echo z ${(m)foo} z\'\necho status=$?\n\neval \'echo...mksh'], variant='BUG'), Assertion(type='stdout', value='', shells=['mksh'], variant='BUG')], line_number=27, skip=None)

    @pytest.mark.asyncio
    async def test_spec_case(self, test_file, test_case):
        """Run a single spec test case."""
        result = await run_test_case(test_case)
    
        if result.skipped:
            pytest.skip(result.skip_reason or "Skipped")
    
        if not result.passed:
>           pytest.fail(format_error(result))
E           Failed: Test: zsh var sub is rejected at runtime (line 27)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1\nstatus=1'
E             actual:   'z z\nstatus=0\n\nstatus=0\n\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1\nstatus=1'
E           Actual stdout:   'z z\nstatus=0\n\nstatus=0\n\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           eval 'echo z ${(m)foo} z'
E           echo status=$?
E           
E           eval 'echo ${x:-${(m)foo}}'
E           echo status=$?
E           
E           # double quoted
E           eval 'echo "${(m)foo}"'
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:218: Failed
______________________ test_bash_spec_file[alias.test.sh] ______________________

test_file = 'alias.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 19 passed, 29 failed, 0 skipped
E           ============================================================
E           
E           Test: Usage of builtins (line 10)
E           
E           status mismatch: expected 127, got 1
E           
E           Expected stdout: 'status=0\nx'
E           Actual stdout:   'status=0\nx\n'
E           Expected stderr: None
E           Actual stderr:   'bash: foo: command not found\n'
E           Expected status: 127
E           Actual status:   1
E           
E           Script:
E           ---
E           shopt -s expand_aliases || true
E           alias -- foo=echo
E           echo status=$?
E           foo x
E           unalias -- foo
E           foo x
E           # dash doesn't accept --
E           ---
E           
E           ---
E           
E           Test: define and use alias on a single line (line 40)
E           
E           stdout mismatch:
E             expected: 'two\nthree'
E             actual:   'one\ntwo\nthree'
E           
E           Expected stdout: 'two\nthree'
E           Actual stdout:   'one\ntwo\nthree\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases
E           alias e=echo; e one  # this is not alias-expanded because we parse lines at once
E           e two; e three
E           ---
E           
E           ---
E           
E           Test: defining multiple aliases, then unalias (line 56)
E           
E           stdout mismatch:
E             expected: 'status=0\nx X\ny Y\nstatus=0\nundefined\nundefined'
E             actual:   'status=0\n$x X\n$y Y\nstatus=0\nundefined\nundefined'
E           
E           Expected stdout: 'status=0\nx X\ny Y\nstatus=0\nundefined\nundefined'
E           Actual stdout:   'status=0\n$x X\n$y Y\nstatus=0\nundefined\nundefined\n'
E           Expected stderr: None
E           Actual stderr:   'bash: echo-x: command not found\nbash: echo-y: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s expand_aliases  # bash requires this
E           x=x
E           y=y
E           alias echo-x='echo $x' echo-y='echo $y'
E           echo status=$?
E           echo-x X
E           echo-y Y
E           unalias echo-x echo-y
E           echo status=$?
E           echo-x X || echo undefined
E           echo-y Y || echo undefined
E           ---
E           
E           ... and 26 more failures

tests/spec_tests/test_spec.py:249: Failed
_____________________ test_bash_spec_file[append.test.sh] ______________________

test_file = 'append.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 8 passed, 11 failed, 1 skipped
E           ============================================================
E           
E           Test: error: s+=(my array) (line 45)
E           
E           stdout mismatch:
E             expected: "['abc', 'd', 'e', 'f']"
E             actual:   "['d', 'e', 'f']"
E           
E           Expected stdout: "['abc', 'd', 'e', 'f']"
E           Actual stdout:   "['d', 'e', 'f']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           s='abc'
E           s+=(d e f)
E           argv.py "${s[@]}"
E           ---
E           
E           ---
E           
E           Test: error: myarray+=s (line 54)
E           
E           stdout mismatch:
E             expected: "['xz', 'y']"
E             actual:   "['x', 'y']"
E           
E           Expected stdout: "['xz', 'y']"
E           Actual stdout:   "['x', 'y']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # They treat this as implicit index 0.  We disallow this on the LHS, so we will
E           # also disallow it on the RHS.
E           a=(x y )
E           a+=z
E           argv.py "${a[@]}"
E           ---
E           
E           ---
E           
E           Test: typeset s+=(my array) (line 68)
E           
E           stdout mismatch:
E             expected: "abc\nstatus=0\n['abc', 'd', 'e', 'f']"
E             actual:   'abc\nstatus=1\n[]'
E           
E           Expected stdout: "abc\nstatus=0\n['abc', 'd', 'e', 'f']"
E           Actual stdout:   'abc\nstatus=1\n[]\n'
E           Expected stderr: None
E           Actual stderr:   "bash: declare: `s+=(d e f)': not a valid identifier\n"
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset s='abc'
E           echo $s
E           
E           typeset s+=(d e f)
E           echo status=$?
E           argv.py "${s[@]}"
E           ---
E           
E           ... and 8 more failures

tests/spec_tests/test_spec.py:249: Failed
____________________ test_bash_spec_file[arg-parse.test.sh] ____________________

test_file = 'arg-parse.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 1 passed, 2 failed, 0 skipped
E           ============================================================
E           
E           Test: shift 1 extra (line 13)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'fail'
E           
E           Expected stdout: ''
E           Actual stdout:   'fail\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           $SH -c '
E           set -- a b c
E           shift 1 extra
E           '
E           if test $? -eq 0; then
E             echo fail
E           fi
E           ---
E           
E           ---
E           
E           Test: continue 1 extra, break, etc. (line 29)
E           
E           Execution error: Expected 'done' to close for loop at line 1, column 17
E           
E           
E           Script:
E           ---
E           $SH -c '
E           for i in foo; do
E             continue 1 extra
E           done
E           echo status=$?
E           '
E           if test $? -eq 0; then
E             echo fail
E           fi
E           ---

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[arith-context.test.sh] __________________

test_file = 'arith-context.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 5 passed, 11 failed, 0 skipped
E           ============================================================
E           
E           Test: Multiple right brackets inside expression (line 12)
E           
E           stdout mismatch:
E             expected: '2 3'
E             actual:   '1 1'
E           
E           Expected stdout: '2 3'
E           Actual stdout:   '1 1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(1 2 3)
E           echo ${a[a[0]]} ${a[a[a[0]]]}
E           ---
E           
E           ---
E           
E           Test: Slicing of string with variables (line 31)
E           
E           stdout mismatch:
E             expected: 'abcd abcd b'
E             actual:   ''
E           
E           Expected stdout: 'abcd abcd b'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           s='abcd'
E           zero=0
E           one=1
E           echo ${s:$zero} ${s:$zero:4} ${s:$one:$one}
E           ---
E           
E           ---
E           
E           Test: Array index on LHS of assignment (line 38)
E           
E           stdout mismatch:
E             expected: '1 X 3'
E             actual:   '1 2 3'
E           
E           Expected stdout: '1 X 3'
E           Actual stdout:   '1 2 3\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(1 2 3)
E           zero=0
E           a[zero+5-4]=X
E           echo ${a[@]}
E           ---
E           
E           ... and 8 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[arith-dynamic.test.sh] __________________

test_file = 'arith-dynamic.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 0 passed, 4 failed, 0 skipped
E           ============================================================
E           
E           Test: Double quotes (line 6)
E           
E           stdout mismatch:
E             expected: '7\n7'
E             actual:   '0\n0'
E           
E           Expected stdout: '7\n7'
E           Actual stdout:   '0\n0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo $(( "1 + 2" * 3 ))
E           echo $(( "1+2" * 3 ))
E           ---
E           
E           ---
E           
E           Test: Single quotes (line 26)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1'
E             actual:   '0\nstatus=0\n0\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1'
E           Actual stdout:   '0\nstatus=0\n0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo $(( '1' + '2' * 3 ))
E           echo status=$?
E           
E           echo $(( '1 + 2' * 3 ))
E           echo status=$?
E           ---
E           
E           ---
E           
E           Test: Substitutions (line 51)
E           
E           stdout mismatch:
E             expected: '7\n7'
E             actual:   '0\n0'
E           
E           Expected stdout: '7\n7'
E           Actual stdout:   '0\n0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x='1 + 2'
E           echo $(( $x * 3 ))
E           echo $(( "$x" * 3 ))
E           ---
E           
E           ... and 1 more failures

tests/spec_tests/test_spec.py:249: Failed
______________________ test_bash_spec_file[arith.test.sh] ______________________

test_file = 'arith.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 46 passed, 26 failed, 2 skipped
E           ============================================================
E           
E           Test: Side Effect in Array Indexing (line 17)
E           
E           stdout mismatch:
E             expected: '6 b=2'
E             actual:   '4 b='
E           
E           Expected stdout: '6 b=2'
E           Actual stdout:   '4 b=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(4 5 6)
E           echo "${a[b=2]} b=$b"
E           ---
E           
E           ---
E           
E           Test: Arith sub with word parts (line 48)
E           
E           stdout mismatch:
E             expected: '14'
E             actual:   '1'
E           
E           Expected stdout: '14'
E           Actual stdout:   '1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Making 13 from two different kinds of sub.  Geez.
E           echo $((1 + $(echo 1)${undefined:-3}))
E           ---
E           
E           ---
E           
E           Test: Backticks within arith sub (line 66)
E           
E           stdout mismatch:
E             expected: '3'
E             actual:   '2'
E           
E           Expected stdout: '3'
E           Actual stdout:   '2\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # This is unnecessary but works in all shells.
E           echo $((`echo 1` + 2))
E           ---
E           
E           ... and 23 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[array-assign.test.sh] ___________________

test_file = 'array-assign.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 0 passed, 11 failed, 0 skipped
E           ============================================================
E           
E           Test: Indexed LHS without spaces, and += (line 6)
E           
E           stdout mismatch:
E             expected: "status=0\n['x']\n['x', 'y']\n['x', 'yz']"
E             actual:   "status=0\n['x']\n['x']\n['x']"
E           
E           Expected stdout: "status=0\n['x']\n['x', 'y']\n['x', 'yz']"
E           Actual stdout:   "status=0\n['x']\n['x']\n['x']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a[1]=x
E           echo status=$?
E           argv.py "${a[@]}"
E           
E           a[0+2]=y
E           #a[2|3]=y  # zsh doesn't allow this
E           argv.py "${a[@]}"
E           
E           # += does appending
E           a[0+2]+=z
E           argv.py "${a[@]}"
E           ---
E           
E           ---
E           
E           Test: Indexed LHS with spaces (line 31)
E           
E           stdout mismatch:
E             expected: "status=0\n['x', 'z']"
E             actual:   'status=1\n[]'
E           
E           Expected stdout: "status=0\n['x', 'z']"
E           Actual stdout:   'status=1\n[]\n'
E           Expected stderr: None
E           Actual stderr:   'bash: a[1: command not found\nbash: a[: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh|ash) exit ;; esac
E           
E           a[1 * 1]=x
E           a[ 1 + 2 ]=z
E           echo status=$?
E           
E           argv.py "${a[@]}"
E           ---
E           
E           ---
E           
E           Test: Nested a[i[0]]=0 (line 46)
E           
E           stdout mismatch:
E             expected: "['0', '1', '2', '3']"
E             actual:   '[]'
E           
E           Expected stdout: "['0', '1', '2', '3']"
E           Actual stdout:   '[]\n'
E           Expected stderr: None
E           Actual stderr:   'bash: a[: command not found\nbash: a[: command not found\nbash: a[: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           i=(0 1 2)
E           
E           a[i[0]]=0
E           a[ i[1] ]=1
E           a[ i[2] ]=2
E           a[ i[1]+i[2] ]=3
E           
E           argv.py "${a[@]}"
E           ---
E           
E           ... and 8 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[array-assoc.test.sh] ___________________

test_file = 'array-assoc.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 15 passed, 26 failed, 1 skipped
E           ============================================================
E           
E           Test: Literal syntax ([x]=y) (line 19)
E           
E           stdout mismatch:
E             expected: 'b\nbar\nc'
E             actual:   ''
E           
E           Expected stdout: 'b\nbar\nc'
E           Actual stdout:   '\n\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A a
E           a=([aa]=b [foo]=bar ['a+1']=c)
E           echo ${a["aa"]}
E           echo ${a["foo"]}
E           echo ${a["a+1"]}
E           ---
E           
E           ---
E           
E           Test: Can initialize assoc array with the "(key value ...)" sequence (line 50)
E           
E           stdout mismatch:
E             expected: 'status=0\ndeclare -A A=()'
E             actual:   'status=0\ndeclare -A A=([0]="1" [1]="2" [2]="3")'
E           
E           Expected stdout: 'status=0\ndeclare -A A=()'
E           Actual stdout:   'status=0\ndeclare -A A=([0]="1" [1]="2" [2]="3")\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A A=(1 2 3)
E           echo status=$?
E           declare -p A
E           
E           # bash-4.4 prints warnings to stderr but gives no indication of the problem
E           ---
E           
E           ---
E           
E           Test: retrieve keys with ! (line 84)
E           
E           stdout mismatch:
E             expected: 'a+1\nfoo\nx'
E             actual:   '"$var"\n\'a+1\'\n\'foo\''
E           
E           Expected stdout: 'a+1\nfoo\nx'
E           Actual stdout:   '"$var"\n\'a+1\'\n\'foo\'\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A a
E           var='x'
E           a["$var"]=b
E           a['foo']=bar
E           a['a+1']=c
E           for key in "${!a[@]}"; do
E             echo $key
E           done | sort
E           ---
E           
E           ... and 23 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[array-compat.test.sh] ___________________

test_file = 'array-compat.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 8 passed, 4 failed, 0 skipped
E           ============================================================
E           
E           Test: User arrays decay (line 23)
E           
E           stdout mismatch:
E             expected: "['x', 'y', 'z']\n['x y z']\n['x', 'YYY', 'z']"
E             actual:   "['x', 'y', 'z']\n['x y z']\n['x y z', 'YYY']"
E           
E           Expected stdout: "['x', 'y', 'z']\n['x y z']\n['x', 'YYY', 'z']"
E           Actual stdout:   "['x', 'y', 'z']\n['x y z']\n['x y z', 'YYY']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -a a b
E           a=(x y z)
E           b="${a[@]}"  # this collapses to a string
E           c=("${a[@]}")  # this preserves the array
E           c[1]=YYY  # mutate a copy -- doesn't affect the original
E           argv.py "${a[@]}"
E           argv.py "${b}"
E           argv.py "${c[@]}"
E           ---
E           
E           ---
E           
E           Test: ++ on a whole array increments the first element (disallowed with strict_array) (line 84)
E           
E           stdout mismatch:
E             expected: '2 10'
E             actual:   '1 10'
E           
E           Expected stdout: '2 10'
E           Actual stdout:   '1 10\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s strict_array
E           
E           a=(1 10)
E           (( a++ ))  # doesn't make sense
E           echo "${a[@]}"
E           ---
E           
E           ---
E           
E           Test: value.BashArray internal representation - Indexed (line 106)
E           
E           stdout mismatch:
E             expected: 'declare -a z=()\ndeclare -a z=([0]="b" [1]="c")\ndeclare -a z=([0]="b" [1]="c" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b-mystr" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b-mystr" [1]="ZZZ-append" [5]="d" [6]="f" [7]="g")\n[\'keys\', \'0\', \'1\', \'5\', \'6\', \'7\']\n[\'values\', \'b-mystr\', \'ZZZ-append\', \'d\', \'f\', \'g\']\nstatus=1'
E             actual:   'declare -- z=""\ndeclare -a z=([0]="b" [1]="c")\ndeclare -a z=([0]="b" [1]="c" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b" [1]="ZZZ-append" [5]="d" [6]="f" [7]="g")\n[\'keys\', \'0\', \'1\', \'5\', \'6\', \'7\']\n[\'values\', \'b\', \'ZZZ-append\', \'d\', \'f\', \'g\']\ndeclare -a z=([0]="b" [1]="ZZZ-append" [5]="d" [6]="f" [7]="g")\nstatus=0'
E           
E           Expected stdout: 'declare -a z=()\ndeclare -a z=([0]="b" [1]="c")\ndeclare -a z=([0]="b" [1]="c" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b-mystr" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b-mystr" [1]="ZZZ-append" [5]="d" [6]="f" [7]="g")\n[\'keys\', \'0\', \'1\', \'5\', \'6\', \'7\']\n[\'values\', \'b-mystr\', \'ZZZ-append\', \'d\', \'f\', \'g\']\nstatus=1'
E           Actual stdout:   'declare -- z=""\ndeclare -a z=([0]="b" [1]="c")\ndeclare -a z=([0]="b" [1]="c" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b" [1]="ZZZ" [5]="d" [6]="f" [7]="g")\ndeclare -a z=([0]="b" [1]="ZZZ-append" [5]="d" [6]="f" [7]="g")\n[\'keys\', \'0\', \'1\', \'5\', \'6\', \'7\']\n[\'values\', \'b\', \'ZZZ-append\', \'d\', \'f\', \'g\']\ndeclare -a z=([0]="b" [1]="ZZZ-append" [5]="d" [6]="f" [7]="g")\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           z=()
E           declare -a | grep z=
E           
E           z+=(b c)
E           declare -a | grep z=
E           
E           # z[5]= finds the index, or puts it in SORTED order I think
E           z[5]=d
E           declare -a | grep z=
E           
E           z[1]=ZZZ
E           declare -a | grep z=
E           
E           # Adds after last index
E           z+=(f g)
E           declare -a | grep z=
E           
E           # This is the equivalent of z[0]+=mystr
E           z+=-mystr
E           declare -a | grep z=
E           
E           z[1]+=-append
E           declare -a | grep z=
E           
E           argv.py keys "${!z[@]}"  # 0 1 5 6 7
E           argv.py values "${z[@]}"
E           
E           # can't do this conversion
E           declare -A z
E           declare -A | grep z=
E           
E           echo status=$?
E           ---
E           
E           ... and 1 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[array-literal.test.sh] __________________

test_file = 'array-literal.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 2 passed, 14 failed, 3 skipped
E           ============================================================
E           
E           Test: Tilde expansions in RHS of [k]=v (BashArray) (line 3)
E           
E           stdout mismatch:
E             expected: '/home/user\n/home/user:/home/user:/home/user'
E             actual:   '~\n~:~:~'
E           
E           Expected stdout: '/home/user\n/home/user:/home/user:/home/user'
E           Actual stdout:   '~\n~:~:~\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           HOME=/home/user
E           a=([2]=~ [4]=~:~:~)
E           echo "${a[2]}"
E           echo "${a[4]}"
E           ---
E           
E           ---
E           
E           Test: [k]=$v and [k]="$@" (BashArray) (line 44)
E           
E           stdout mismatch:
E             expected: "keys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '1 2 3']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']"
E             actual:   "keys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']"
E           
E           Expected stdout: "keys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '1 2 3']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']\nkeys: ['0', '1', '2', '5']\nvals: ['1', '2', '3', '3 5 7']"
E           Actual stdout:   "keys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']\nkeys: ['0', 'i']\nvals: ['1 2 3']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           i=5
E           v='1 2 3'
E           a=($v [i]=$v)
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           
E           x=(3 5 7)
E           a=($v [i]="${x[*]}")
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           a=($v [i]="${x[@]}")
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           a=($v [i]=${x[*]})
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           a=($v [i]=${x[@]})
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           ---
E           
E           ---
E           
E           Test: [k]=$v and [k]="$@" (BashAssoc) (line 77)
E           
E           stdout mismatch:
E             expected: "keys: ['i']\nvals: ['1 2 3']\nkeys: ['i']\nvals: ['3 5 7']\nkeys: ['i']\nvals: ['3 5 7']\nkeys: ['i']\nvals: ['3 5 7']\nkeys: ['i']\nvals: ['3 5 7']"
E             actual:   "keys: ['i']\nvals: []\nkeys: ['i']\nvals: []\nkeys: ['i']\nvals: []\nkeys: ['i']\nvals: []\nkeys: ['i']\nvals: []"
E           
E           Expected stdout: "keys: ['i']\nvals: ['1 2 3']\nkeys: ['i']\nvals: ['3 5 7']\nkeys: ['i']\nvals: ['3 5 7']\nkeys: ['i']\nvals: ['3 5 7']\nkeys: ['i']\nvals: ['3 5 7']"
E           Actual stdout:   "keys: ['i']\nvals: []\nkeys: ['i']\nvals: []\nkeys: ['i']\nvals: []\nkeys: ['i']\nvals: []\nkeys: ['i']\nvals: []\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           i=5
E           v='1 2 3'
E           declare -A a
E           a=([i]=$v)
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           
E           x=(3 5 7)
E           a=([i]="${x[*]}")
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           a=([i]="${x[@]}")
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           a=([i]=${x[*]})
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           a=([i]=${x[@]})
E           printf 'keys: '; argv.py "${!a[@]}"
E           printf 'vals: '; argv.py "${a[@]}"
E           ---
E           
E           ... and 11 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[array-sparse.test.sh] ___________________

test_file = 'array-sparse.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 8 passed, 24 failed, 8 skipped
E           ============================================================
E           
E           Test: test "declare -p sp" (line 85)
E           
E           stdout mismatch:
E             expected: 'declare -a a0=()\ndeclare -a a1=([0]="1")\ndeclare -a a2=([0]="1" [1]="2")\ndeclare -a a=([0]="x" [1]="y" [2]="z" [3]="w" [500]="100" [1000]="100")'
E             actual:   ''
E           
E           Expected stdout: 'declare -a a0=()\ndeclare -a a1=([0]="1")\ndeclare -a a2=([0]="1" [1]="2")\ndeclare -a a=([0]="x" [1]="y" [2]="z" [3]="w" [500]="100" [1000]="100")'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a0=()
E           a1=(1)
E           a2=(1 2)
E           a=(x y z w)
E           a[500]=100
E           a[1000]=100
E           
E           case $SH in
E           bash|mksh)
E             typeset -p a0 a1 a2 a
E             exit ;;
E           esac
E           
E           declare -p a0 a1 a2 a
E           ---
E           
E           ---
E           
E           Test: Negative index with a[i]=v (line 188)
E           
E           stdout mismatch:
E             expected: 'declare -a sp1=([9]=x)\ndeclare -a sp1=([0]=D [2]=C [6]=B [9]=A)'
E             actual:   'declare -a sp1=([9]=x)\ndeclare -a sp1=([-10]=D [-8]=C [-4]=B [-1]=A [9]=x)'
E           
E           Expected stdout: 'declare -a sp1=([9]=x)\ndeclare -a sp1=([0]=D [2]=C [6]=B [9]=A)'
E           Actual stdout:   'declare -a sp1=([9]=x)\ndeclare -a sp1=([-10]=D [-8]=C [-4]=B [-1]=A [9]=x)\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           sp1[9]=x
E           typeset -p sp1 | sed 's/"//g'
E           
E           sp1[-1]=A
E           sp1[-4]=B
E           sp1[-8]=C
E           sp1[-10]=D
E           typeset -p sp1 | sed 's/"//g'
E           ---
E           
E           ---
E           
E           Test: a[i]=v with BigInt (line 209)
E           
E           stdout mismatch:
E             expected: '3\n4\n5\n6'
E             actual:   '3\n3\n3\n3'
E           
E           Expected stdout: '3\n4\n5\n6'
E           Actual stdout:   '3\n3\n3\n3\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh) exit ;; esac
E           
E           sp1[1]=x
E           sp1[5]=y
E           sp1[9]=z
E           
E           echo "${#sp1[@]}"
E           sp1[0x7FFFFFFFFFFFFFFF]=a
E           echo "${#sp1[@]}"
E           sp1[0x7FFFFFFFFFFFFFFE]=b
E           echo "${#sp1[@]}"
E           sp1[0x7FFFFFFFFFFFFFFD]=c
E           echo "${#sp1[@]}"
E           ---
E           
E           ... and 21 more failures

tests/spec_tests/test_spec.py:249: Failed
______________________ test_bash_spec_file[array.test.sh] ______________________

test_file = 'array.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 30 passed, 45 failed, 2 skipped
E           ============================================================
E           
E           Test: local array (line 19)
E           
E           stdout mismatch:
E             expected: "['1']"
E             actual:   "['']"
E           
E           Expected stdout: "['1']"
E           Actual stdout:   "['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # mksh support local variables, but not local arrays, oddly.
E           f() {
E             local a=(1 '2 3')
E             argv.py "${a[0]}"
E           }
E           f
E           ---
E           
E           ---
E           
E           Test: Command with with word splitting in array (line 31)
E           
E           stdout mismatch:
E             expected: "['1 2', '3', '4']"
E             actual:   "['1 2', '3 4']"
E           
E           Expected stdout: "['1 2', '3', '4']"
E           Actual stdout:   "['1 2', '3 4']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           array=('1 2' $(echo '3 4'))
E           argv.py "${array[@]}"
E           ---
E           
E           ---
E           
E           Test: space before ( in array initialization (line 36)
E           
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: None
E           Actual stdout:   '1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           # NOTE: mksh accepts this, but bash doesn't
E           a= (1 '2 3')
E           echo $a
E           ---
E           
E           ... and 42 more failures

tests/spec_tests/test_spec.py:249: Failed
_________________ test_bash_spec_file[assign-deferred.test.sh] _________________

test_file = 'assign-deferred.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 4 passed, 4 failed, 1 skipped
E           ============================================================
E           
E           Test: typeset -a a[1]=a a[3]=c (line 16)
E           
E           stdout mismatch:
E             expected: "['x', 'z']"
E             actual:   '[]'
E           
E           Expected stdout: "['x', 'z']"
E           Actual stdout:   '[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # declare works the same way in bash, but not mksh.
E           # spaces are NOT allowed here.
E           typeset -a a[1*1]=x a[1+2]=z
E           argv.py "${a[@]}"
E           ---
E           
E           ---
E           
E           Test: local a[3]=4 (line 23)
E           
E           stdout mismatch:
E             expected: "status=0\n['3', '5', '4', '6']"
E             actual:   'status=1\n[]'
E           
E           Expected stdout: "status=0\n['3', '5', '4', '6']"
E           Actual stdout:   'status=1\n[]\n'
E           Expected stderr: None
E           Actual stderr:   "bash: local: 'a[3]': not a valid identifier\n"
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E             local a[3]=4 a[5]=6
E             echo status=$?
E             argv.py "${!a[@]}" "${a[@]}"
E           }
E           f
E           ---
E           
E           ---
E           
E           Test: is 'builtin' prefix and array allowed?  OSH is smarter (line 95)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'len=0'
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'len=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           builtin typeset a=(1 2 3)
E           echo len=${#a[@]}
E           ---
E           
E           ... and 1 more failures

tests/spec_tests/test_spec.py:249: Failed
_________________ test_bash_spec_file[assign-dialects.test.sh] _________________

test_file = 'assign-dialects.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 2 passed, 2 failed, 0 skipped
E           ============================================================
E           
E           Test: test -v with arrays (line 39)
E           
E           stdout mismatch:
E             expected: 'a=1\na[0]=1\n\na=0\na[0]=0\n\na[1]=1\na[x]=0'
E             actual:   'a=1\na[0]=1\n\na=1\na[0]=0\n\na[1]=1\na[x]=1'
E           
E           Expected stdout: 'a=1\na[0]=1\n\na=0\na[0]=0\n\na[1]=1\na[x]=0\n'
E           Actual stdout:   'a=1\na[0]=1\n\na=1\na[0]=0\n\na[1]=1\na[x]=1\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset -a a
E           
E           test -v a
E           echo a=$?
E           test -v 'a[0]'
E           echo "a[0]=$?"
E           echo
E           
E           a[0]=1
E           
E           test -v a
E           echo a=$?
E           test -v 'a[0]'
E           echo "a[0]=$?"
E           echo
E           
E           test -v 'a[1]'
E           echo "a[1]=$?"
E           
E           # stupid rule about undefined 'x'
E           test -v 'a[x]'
E           echo "a[x]=$?"
E           echo
E           ---
E           
E           ---
E           
E           Test: test -v with assoc arrays (line 89)
E           
E           stdout mismatch:
E             expected: 'A=1\nA[0]=1\n\nA=0\nA[0]=0\n\nA[1]=1\nA[x]=1'
E             actual:   'A=1\nA[0]=1\n\nA=1\nA[0]=1\n\nA[1]=1\nA[x]=1'
E           
E           Expected stdout: 'A=1\nA[0]=1\n\nA=0\nA[0]=0\n\nA[1]=1\nA[x]=1\n'
E           Actual stdout:   'A=1\nA[0]=1\n\nA=1\nA[0]=1\n\nA[1]=1\nA[x]=1\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           typeset -A A
E           
E           test -v A
E           echo A=$?
E           test -v 'A[0]'
E           echo "A[0]=$?"
E           echo
E           
E           A['0']=x
E           
E           test -v A
E           echo A=$?
E           test -v 'A[0]'
E           echo "A[0]=$?"
E           echo
E           
E           test -v 'A[1]'
E           echo "A[1]=$?"
E           
E           # stupid rule about undefined 'x'
E           test -v 'A[x]'
E           echo "A[x]=$?"
E           echo
E           ---

tests/spec_tests/test_spec.py:249: Failed
_________________ test_bash_spec_file[assign-extended.test.sh] _________________

test_file = 'assign-extended.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 13 passed, 20 failed, 4 skipped
E           ============================================================
E           
E           Test: declare (line 97)
E           
E           stdout mismatch:
E             expected: '[declare]\ntest_var1=111\ntest_var2=222\ntest_var3=333\ntest_var4=test_var1\ntest_var5=555\n[readonly]\ndeclare -r test_var2="222"\n[export]\ndeclare -x test_var3="333"\n[local]\ntest_var5=555'
E             actual:   '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\ndeclare -x __readonly__="test_var2"\ndeclare -x test_var1="111"\ndeclare -x test_var2="222"\ndeclare -x test_var3="333"\ndeclare -x test_var5="555"\n[local]'
E           
E           Expected stdout: '[declare]\ntest_var1=111\ntest_var2=222\ntest_var3=333\ntest_var4=test_var1\ntest_var5=555\n[readonly]\ndeclare -r test_var2="222"\n[export]\ndeclare -x test_var3="333"\n[local]\ntest_var5=555'
E           Actual stdout:   '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\ndeclare -x __readonly__="test_var2"\ndeclare -x test_var1="111"\ndeclare -x test_var2="222"\ndeclare -x test_var3="333"\ndeclare -x test_var5="555"\n[local]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           test_var1=111
E           readonly test_var2=222
E           export test_var3=333
E           declare -n test_var4=test_var1
E           f1() {
E             local test_var5=555
E             {
E               echo '[declare]'
E               declare
E               echo '[readonly]'
E               readonly
E               echo '[export]'
E               export
E               echo '[local]'
E               local
E             } | grep -E '^\[|^\b.*test_var.\b'
E           }
E           f1
E           ---
E           
E           ---
E           
E           Test: declare -p (line 157)
E           
E           stdout mismatch:
E             expected: '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -x test_var3="333"\ndeclare -n test_var4="test_var1"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\ndeclare -x test_var3="333"\n[local]\ntest_var5=555'
E             actual:   '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\n[local]'
E           
E           Expected stdout: '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -x test_var3="333"\ndeclare -n test_var4="test_var1"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\ndeclare -x test_var3="333"\n[local]\ntest_var5=555'
E           Actual stdout:   '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\n[local]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # BUG: bash doesn't output flags with "local -p", which seems to contradict
E           #   with manual.
E           test_var1=111
E           readonly test_var2=222
E           export test_var3=333
E           declare -n test_var4=test_var1
E           f1() {
E             local test_var5=555
E             {
E               echo '[declare]'
E               declare -p
E               echo '[readonly]'
E               readonly -p
E               echo '[export]'
E               export -p
E               echo '[local]'
E               local -p
E             } | grep -E '^\[|^\b.*test_var.\b'
E           }
E           f1
E           ---
E           
E           ---
E           
E           Test: declare -p var (line 264)
E           
E           stdout mismatch:
E             expected: '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -x test_var3="333"\ndeclare -n test_var4="test_var1"\ndeclare -- test_var5="555"\n[readonly]\n[export]\n[local]'
E             actual:   '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -n test_var4="test_var1"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\n[local]'
E           
E           Expected stdout: '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -x test_var3="333"\ndeclare -n test_var4="test_var1"\ndeclare -- test_var5="555"\n[readonly]\n[export]\n[local]'
E           Actual stdout:   '[declare]\ndeclare -- test_var1="111"\ndeclare -r test_var2="222"\ndeclare -- test_var3="333"\ndeclare -n test_var4="test_var1"\ndeclare -- test_var5="555"\n[readonly]\ndeclare -r test_var2="222"\n[export]\n[local]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # BUG? bash doesn't output anything for 'local/readonly -p var', which seems to
E           #   contradict with manual.  Besides, 'export -p var' is not described in
E           #   manual
E           test_var1=111
E           readonly test_var2=222
E           export test_var3=333
E           declare -n test_var4=test_var1
E           f1() {
E             local test_var5=555
E             {
E               echo '[declare]'
E               declare -p test_var{0..5}
E               echo '[readonly]'
E               readonly -p test_var{0..5}
E               echo '[export]'
E               export -p test_var{0..5}
E               echo '[local]'
E               local -p test_var{0..5}
E             } | grep -E '^\[|^\b.*test_var.\b'
E           }
E           f1
E           ---
E           
E           ... and 17 more failures

tests/spec_tests/test_spec.py:249: Failed
_____________________ test_bash_spec_file[assign.test.sh] ______________________

test_file = 'assign.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 26 passed, 20 failed, 1 skipped
E           ============================================================
E           
E           Test: Escaped = in command name (line 113)
E           
E           stdout mismatch:
E             expected: 'HI'
E             actual:   ''
E           
E           Expected stdout: 'HI'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # foo=bar is in the 'spec/bin' dir.
E           foo\=bar
E           ---
E           
E           ---
E           
E           Test: Env binding not allowed before compound command (line 119)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 4, column 23
E           
E           
E           Script:
E           ---
E           # bash gives exit code 2 for syntax error, because of 'do'.
E           # dash gives 0 because there is stuff after for?  Should really give an error.
E           # mksh gives acceptable error of 1.
E           FOO=bar for i in a b; do printenv.py $FOO; done
E           ---
E           
E           ---
E           
E           Test: Trying to run keyword 'for' (line 127)
E           
E           status mismatch: expected 127, got 0
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 127
E           Actual status:   0
E           
E           Script:
E           ---
E           FOO=bar for
E           ---
E           
E           ... and 17 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[ble-features.test.sh] ___________________

test_file = 'ble-features.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 2 passed, 7 failed, 0 skipped
E           ============================================================
E           
E           Test: [bash_unset] local-unset / dynamic-unset for localvar (line 3)
E           
E           Execution error: Expected ')' after '(' in function definition at line 24, column 16
E           
E           
E           Script:
E           ---
E           unlocal() { unset -v "$1"; }
E           
E           f1() {
E             local v=local
E             unset v
E             echo "[$1,local,(unset)] v: ${v-(unset)}"
E           }
E           v=global
E           f1 global
E           
E           f1() {
E             local v=local
E             unlocal v
E             echo "[$1,local,(unlocal)] v: ${v-(unset)}"
E           }
E           v=global
E           f1 'global'
E           
E           
E           
E           # always-value-unset
E           #   local-unset   = value-unset
E           #   dynamic-unset = value-unset
E           [global,local,(unset)] v: (unset)
E           [global,local,(unlocal)] v: (unset)
E           ---
E           
E           ---
E           
E           Test: [bash_unset] local-unset / dynamic-unset for localvar (mutated from tempenv) (line 47)
E           
E           Execution error: Expected ')' after '(' in function definition at line 28, column 24
E           
E           
E           Script:
E           ---
E           unlocal() { unset -v "$1"; }
E           
E           f1() {
E             local v=local
E             unset v
E             echo "[$1,local,(unset)] v: ${v-(unset)}"
E           }
E           v=global
E           v=tempenv f1 'global,tempenv'
E           
E           f1() {
E             local v=local
E             unlocal v
E             echo "[$1,local,(unlocal)] v: ${v-(unset)}"
E           }
E           v=global
E           v=tempenv f1 'global,tempenv'
E           
E           
E           # Note on bug in bash 4.3 to bash 5.0
E           # [global,tempenv,local,(unset)] v: global
E           # [global,tempenv,local,(unlocal)] v: global
E           
E           
E           # always-value-unset
E           #   local-unset   = value-unset
E           #   dynamic-unset = value-unset
E           [global,tempenv,local,(unset)] v: (unset)
E           [global,tempenv,local,(unlocal)] v: (unset)
E           ---
E           
E           ---
E           
E           Test: [bash_unset] local-unset / dynamic-unset for tempenv (line 95)
E           
E           stdout mismatch:
E             expected: '# always-cell-unset, bash-unset\n#   local-unset   = cell-unset\n#   dynamic-unset = cell-unset\n[global,tempenv,(unset)] v: global\n[global,tempenv,(unlocal)] v: global'
E             actual:   '[global,tempenv,(unset)] v: (unset)\n[global,tempenv,(unlocal)] v: (unset)'
E           
E           Expected stdout: '# always-cell-unset, bash-unset\n#   local-unset   = cell-unset\n#   dynamic-unset = cell-unset\n[global,tempenv,(unset)] v: global\n[global,tempenv,(unlocal)] v: global'
E           Actual stdout:   '[global,tempenv,(unset)] v: (unset)\n[global,tempenv,(unlocal)] v: (unset)\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           unlocal() { unset -v "$1"; }
E           
E           f1() {
E             unset v
E             echo "[$1,(unset)] v: ${v-(unset)}"
E           }
E           v=global
E           v=tempenv f1 'global,tempenv'
E           
E           f1() {
E             unlocal v
E             echo "[$1,(unlocal)] v: ${v-(unset)}"
E           }
E           v=global
E           v=tempenv f1 'global,tempenv'
E           ---
E           
E           ... and 4 more failures

tests/spec_tests/test_spec.py:249: Failed
____________________ test_bash_spec_file[ble-unset.test.sh] ____________________

test_file = 'ble-unset.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 0 passed, 5 failed, 0 skipped
E           ============================================================
E           
E           Test: [bash_unset] nested context by tempenv-eval (line 7)
E           
E           stdout mismatch:
E             expected: '# localvar-nest yes\n[global,tempenv1,local1] v: local1\n[global,tempenv1,local1,tempenv2,(eval)] v: tempenv2\n[global,tempenv1,local1,tempenv2,(eval),local2] v: local2\n[global,tempenv1,local1] v: local1 (after)'
E             actual:   '[global,tempenv1,local1] v: local1\n[global,tempenv1,local1,tempenv2,(eval)] v: tempenv2\n[global,tempenv1,local1,tempenv2,(eval),local2] v: local2\n[global,tempenv1,local1] v: local1 (after)'
E           
E           Expected stdout: '# localvar-nest yes\n[global,tempenv1,local1] v: local1\n[global,tempenv1,local1,tempenv2,(eval)] v: tempenv2\n[global,tempenv1,local1,tempenv2,(eval),local2] v: local2\n[global,tempenv1,local1] v: local1 (after)'
E           Actual stdout:   '[global,tempenv1,local1] v: local1\n[global,tempenv1,local1,tempenv2,(eval)] v: tempenv2\n[global,tempenv1,local1,tempenv2,(eval),local2] v: local2\n[global,tempenv1,local1] v: local1 (after)\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f1() {
E             local v=local1
E             echo "[$1,local1] v: ${v-(unset)}"
E             v=tempenv2 eval '
E               echo "[$1,local1,tempenv2,(eval)] v: ${v-(unset)}"
E               local v=local2
E               echo "[$1,local1,tempenv2,(eval),local2] v: ${v-(unset)}"
E             '
E             echo "[$1,local1] v: ${v-(unset)} (after)"
E           }
E           v=global
E           v=tempenv1 f1 global,tempenv1
E           ---
E           
E           ---
E           
E           Test: [bash_unset] local-unset / dynamic-unset for localvar on nested-context (line 37)
E           
E           Execution error: Expected ')' after '(' in function definition at line 16, column 35
E           
E           
E           Script:
E           ---
E           unlocal() { unset -v "$1"; }
E           
E           f2() {
E             local v=local1
E             v=tempenv2 eval '
E               local v=local2
E               (unset v  ; echo "[$1,local1,tempenv2,(eval),local2,(unset)] v: ${v-(unset)}")
E               (unlocal v; echo "[$1,local1,tempenv2,(eval),local2,(unlocal)] v: ${v-(unset)}")
E             '
E           }
E           v=global
E           v=tempenv1 f2 global,tempenv1
E           
E           
E           # Note that bash-4.3 to bash 5.0 behave differently
E           # [global,tempenv1,local1,tempenv2,(eval),local2,(unset)] v: local1
E           # [global,tempenv1,local1,tempenv2,(eval),local2,(unlocal)] v: local1
E           
E           # always-value-unset
E           [global,tempenv1,local1,tempenv2,(eval),local2,(unset)] v: (unset)
E           [global,tempenv1,local1,tempenv2,(eval),local2,(unlocal)] v: (unset)
E           ---
E           
E           ---
E           
E           Test: [bash_unset] dynamic-unset for nested localvars (line 73)
E           
E           stdout mismatch:
E             expected: '# cell-unset x localvar-tempenv-share x tempenv-in-localctx\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local3\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local2 (unlocal 1)\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local1 (unlocal 2)\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: global (unlocal 3)\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 4)'
E             actual:   '[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 1)\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 2)\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 3)\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 4)'
E           
E           Expected stdout: '# cell-unset x localvar-tempenv-share x tempenv-in-localctx\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local3\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local2 (unlocal 1)\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local1 (unlocal 2)\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: global (unlocal 3)\n[global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 4)'
E           Actual stdout:   '[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 1)\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 2)\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 3)\n[\\global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: \\tempenv3 (unlocal 4)\n'
E           Expected stderr: None
E           Actual stderr:   'bash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\nbash: [global,tempenv1/local1,tempenv2/local2,tempenv3/local3]: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           unlocal() { unset -v "$1"; }
E           
E           f3() {
E             local v=local1
E             v=tempenv2 eval '
E               local v=local2
E               v=tempenv3 eval "
E                 local v=local3
E                 echo \"[\$1/local1,tempenv2/local2,tempenv3/local3] v: \${v-(unset)}\"
E                 unlocal v
E                 echo \"[\$1/local1,tempenv2/local2,tempenv3/local3] v: \${v-(unset)} (unlocal 1)\"
E                 unlocal v
E                 echo \"[\$1/local1,tempenv2/local2,tempenv3/local3] v: \${v-(unset)} (unlocal 2)\"
E                 unlocal v
E                 echo \"[\$1/local1,tempenv2/local2,tempenv3/local3] v: \${v-(unset)} (unlocal 3)\"
E                 unlocal v
E                 echo \"[\$1/local1,tempenv2/local2,tempenv3/local3] v: \${v-(unset)} (unlocal 4)\"
E               "
E             '
E           }
E           v=global
E           v=tempenv1 f3 global,tempenv1
E           
E           
E           # value-unset
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local3
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 1)
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 2)
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 3)
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 4)
E           
E           
E           # cell-unset (remove all localvar/tempenv) x tempenv-value-unset
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: local3
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: tempenv1 (unlocal 1)
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 2)
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 3)
E           [global,tempenv1/local1,tempenv2/local2,tempenv3/local3] v: (unset) (unlocal 4)
E           ---
E           
E           ... and 2 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[bool-parse.test.sh] ____________________

test_file = 'bool-parse.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 5 passed, 3 failed, 0 skipped
E           ============================================================
E           
E           Test: test builtin: ( = ) is confusing: equality test or non-empty string test (line 44)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=0'
E             actual:   'status=0\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=0'
E           Actual stdout:   'status=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # here it's equality
E           test '(' = ')'
E           echo status=$?
E           
E           # here it's like -n =
E           test 0 -eq 0 -a '(' = ')'
E           echo status=$?
E           ---
E           
E           ---
E           
E           Test: test builtin: ( == ) is confusing: equality test or non-empty string test (line 64)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=0'
E             actual:   'status=0\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=0'
E           Actual stdout:   'status=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # here it's equality
E           test '(' == ')'
E           echo status=$?
E           
E           # here it's like -n ==
E           test 0 -eq 0 -a '(' == ')'
E           echo status=$?
E           ---
E           
E           ---
E           
E           Test: Not allowed: [[ ) ]] and [[ ( ]] (line 110)
E           
E           Execution error: Expected conditional expression at line 1, column 4
E           
E           
E           Script:
E           ---
E           [[ ) ]]
E           echo status=$?
E           [[ ( ]]
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:249: Failed
_________________ test_bash_spec_file[brace-expansion.test.sh] _________________

test_file = 'brace-expansion.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 39 passed, 16 failed, 0 skipped
E           ============================================================
E           
E           Test: partial leading expansion (line 13)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 1, column 6
E           
E           
E           Script:
E           ---
E           echo }_{a,b}
E           ---
E           
E           ---
E           
E           Test: double expansion with single and double quotes (line 44)
E           
E           stdout mismatch:
E             expected: 'a_c a_d b_c b_d'
E             actual:   '{a,b}_{c,d}'
E           
E           Expected stdout: 'a_c a_d b_c b_d'
E           Actual stdout:   '{a,b}_{c,d}\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo {'a',b}_{c,"d"}
E           ---
E           
E           ---
E           
E           Test: expansion with mixed quotes (line 48)
E           
E           stdout mismatch:
E             expected: '-Xb- -cd-'
E             actual:   '-{Xb,cd}-'
E           
E           Expected stdout: '-Xb- -cd-'
E           Actual stdout:   '-{Xb,cd}-\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo -{\X"b",'cd'}-
E           ---
E           
E           ... and 13 more failures

tests/spec_tests/test_spec.py:249: Failed
______________________ test_bash_spec_file[bugs.test.sh] _______________________

test_file = 'bugs.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 11 passed, 17 failed, 0 skipped
E           ============================================================
E           
E           Test: assign readonly -- one line (line 25)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'hi'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'hi\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           readonly x=1; x=2; echo hi
E           ---
E           
E           ---
E           
E           Test: assign readonly -- multiple lines -- set -o posix (line 45)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'hi'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'hi\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: posix: invalid option name\n'
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           set -o posix
E           readonly x=1
E           x=2
E           echo hi
E           ---
E           
E           ---
E           
E           Test: First word like foo$x() and foo$[1+2] (regression) (line 74)
E           
E           Execution error: Expected ')' after '(' in function definition at line 2, column 14
E           
E           
E           Script:
E           ---
E           # Problem: $x() func call broke this error message
E           foo$identity('z')
E           
E           foo$[1+2]
E           
E           echo DONE
E           ---
E           
E           ... and 14 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[builtin-bash.test.sh] ___________________

test_file = 'builtin-bash.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 5 passed, 8 failed, 0 skipped
E           ============================================================
E           
E           Test: bad help topic (line 18)
E           
E           stdout mismatch:
E             expected: 'help=1\ngrep=0'
E             actual:   'help=127\ngrep=0'
E           
E           Expected stdout: 'help=1\ngrep=0'
E           Actual stdout:   'help=127\ngrep=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           help ZZZ 2>$TMP/err.txt
E           echo "help=$?"
E           cat $TMP/err.txt | grep -i 'no help topics' >/dev/null
E           echo "grep=$?"
E           ---
E           
E           ---
E           
E           Test: mapfile (line 28)
E           
E           stdout mismatch:
E             expected: 'n=3\n[1\n]\n[3\n]\n[5\n]'
E             actual:   'n=3\n[1]\n[3]\n[5]'
E           
E           Expected stdout: 'n=3\n[1\n]\n[3\n]\n[5\n]'
E           Actual stdout:   'n=3\n[1]\n[3]\n[5]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           type mapfile >/dev/null 2>&1 || exit 0
E           printf '%s\n' {1..5..2} | {
E             mapfile
E             echo "n=${#MAPFILE[@]}"
E             printf '[%s]\n' "${MAPFILE[@]}"
E           }
E           ---
E           
E           ---
E           
E           Test: readarray (synonym for mapfile) (line 47)
E           
E           stdout mismatch:
E             expected: 'n=3\n[1\n]\n[3\n]\n[5\n]'
E             actual:   'n=3\n[1]\n[3]\n[5]'
E           
E           Expected stdout: 'n=3\n[1\n]\n[3\n]\n[5\n]'
E           Actual stdout:   'n=3\n[1]\n[3]\n[5]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           type readarray >/dev/null 2>&1 || exit 0
E           printf '%s\n' {1..5..2} | {
E             readarray
E             echo "n=${#MAPFILE[@]}"
E             printf '[%s]\n' "${MAPFILE[@]}"
E           }
E           ---
E           
E           ... and 5 more failures

tests/spec_tests/test_spec.py:249: Failed
_________________ test_bash_spec_file[builtin-bracket.test.sh] _________________

test_file = 'builtin-bracket.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 41 passed, 11 failed, 0 skipped
E           ============================================================
E           
E           Test: four args (line 82)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1'
E             actual:   'status=0\nstatus=1'
E           
E           Expected stdout: 'status=1\nstatus=1'
E           Actual stdout:   'status=0\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   "bash: [: missing `]'\nbash: foo: command not found\n"
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           [ ! foo = foo ]
E           echo status=$?
E           [ \( -z foo \) ]
E           echo status=$?
E           ---
E           
E           ---
E           
E           Test: ( ) ! -a -o with system version of [ (line 144)
E           
E           Execution error: Expected ')' to close subshell at line 1, column 13
E           
E           
E           Script:
E           ---
E           command [ --version
E           command [ -z '' -a '(' ! -z x ')' ] && echo true
E           ---
E           
E           ---
E           
E           Test: -x (line 244)
E           
E           stdout mismatch:
E             expected: 'no\nyes\nbad'
E             actual:   'yes\nbad'
E           
E           Expected stdout: 'no\nyes\nbad'
E           Actual stdout:   'yes\nbad\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           rm -f $TMP/x
E           echo 'echo hi' > $TMP/x
E           test -x $TMP/x || echo 'no'
E           chmod +x $TMP/x
E           test -x $TMP/x && echo 'yes'
E           test -x $TMP/__nonexistent__ || echo 'bad'
E           ---
E           
E           ... and 8 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[builtin-cd.test.sh] ____________________

test_file = 'builtin-cd.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 17 passed, 11 failed, 2 skipped
E           ============================================================
E           
E           Test: cd BAD/.. (line 10)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   'status=0'
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Odd divergence in shells: dash and mksh normalize the path and don't check
E           # this error.
E           # TODO: I would like OSH to behave like bash and zsh, but separating chdir_arg
E           # and pwd_arg breaks case 17.
E           
E           cd nonexistent_ZZ/..
E           echo status=$?
E           ---
E           
E           ---
E           
E           Test: cd with 2 or more args - with strict_arg_parse (line 26)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=0\nfailed with multiple args'
E             actual:   'status=0\nstatus=0'
E           
E           Expected stdout: 'status=0\nstatus=0\nfailed with multiple args'
E           Actual stdout:   'status=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s strict_arg_parse
E           
E           mkdir -p foo
E           cd foo
E           echo status=$?
E           cd ..
E           echo status=$?
E           
E           
E           cd foo bar
E           st=$?
E           if test $st -ne 0; then
E             echo 'failed with multiple args'
E           fi
E           ---
E           
E           ---
E           
E           Test: cd - without OLDPWD (line 64)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   'status=0'
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           cd - > /dev/null  # silence dash output
E           echo status=$?
E           #pwd
E           ---
E           
E           ... and 8 more failures

tests/spec_tests/test_spec.py:249: Failed
_______________ test_bash_spec_file[builtin-completion.test.sh] ________________

test_file = 'builtin-completion.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 8 passed, 40 failed, 3 skipped
E           ============================================================
E           
E           Test: complete with no args and complete -p both print completion spec (line 4)
E           
E           stdout mismatch:
E             expected: "complete -W 'foo bar' mycommand\ncomplete -W 'foo bar' mycommand\ncomplete -F myfunc other"
E             actual:   ''
E           
E           Expected stdout: "complete -W 'foo bar' mycommand\ncomplete -W 'foo bar' mycommand\ncomplete -F myfunc other"
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: complete: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           set -e
E           
E           complete
E           
E           complete -W 'foo bar' mycommand
E           
E           complete -p
E           
E           complete -F myfunc other
E           
E           complete
E           ---
E           
E           ---
E           
E           Test: complete -F f is usage error (line 23)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=2'
E             actual:   'status=1\nstatus=1'
E           
E           Expected stdout: 'status=0\nstatus=2'
E           Actual stdout:   'status=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: complete: command not found\nbash: complete: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           #complete -F f cmd
E           
E           # Alias for complete -p
E           complete > /dev/null  # ignore OSH output for now
E           echo status=$?
E           
E           # But this is an error
E           complete -F f
E           echo status=$?
E           ---
E           
E           ---
E           
E           Test: complete with nonexistent function (line 39)
E           
E           stdout mismatch:
E             expected: 'status=0'
E             actual:   'status=1'
E           
E           Expected stdout: 'status=0'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: complete: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           complete -F invalidZZ -D
E           echo status=$?
E           ---
E           
E           ... and 37 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[builtin-dirs.test.sh] ___________________

test_file = 'builtin-dirs.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 3 passed, 15 failed, 0 skipped
E           ============================================================
E           
E           Test: pushd/popd (line 5)
E           
E           stdout mismatch:
E             expected: '~ /\npwd=/tmp\n/\npwd=/'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: '~ /\npwd=/tmp\n/\npwd=/'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           set -o errexit
E           cd /
E           pushd /tmp
E           echo -n pwd=; pwd
E           popd
E           echo -n pwd=; pwd
E           ---
E           
E           ---
E           
E           Test: pushd usage (line 26)
E           
E           stdout mismatch:
E             expected: 'status=2\nstatus=0\nstatus=0'
E             actual:   'status=1\nstatus=1\nstatus=1'
E           
E           Expected stdout: 'status=2\nstatus=0\nstatus=0'
E           Actual stdout:   'status=1\nstatus=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\nbash: pushd: command not found\nbash: pushd: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           pushd -z
E           echo status=$?
E           pushd /tmp >/dev/null
E           echo status=$?
E           pushd -- /tmp >/dev/null
E           echo status=$?
E           ---
E           
E           ---
E           
E           Test: popd usage error (line 44)
E           
E           stdout mismatch:
E             expected: 'status=2\nstatus=0\nstatus=2'
E             actual:   'status=1\nstatus=1\nstatus=1'
E           
E           Expected stdout: 'status=2\nstatus=0\nstatus=2'
E           Actual stdout:   'status=1\nstatus=1\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: pushd: command not found\nbash: popd: command not found\nbash: popd: command not found\nbash: popd: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           pushd / >/dev/null
E           popd zzz
E           echo status=$?
E           
E           popd -- >/dev/null
E           echo status=$?
E           
E           popd -z
E           echo status=$?
E           ---
E           
E           ... and 12 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[builtin-echo.test.sh] ___________________

test_file = 'builtin-echo.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 13 passed, 12 failed, 0 skipped
E           ============================================================
E           
E           Test: echo -e with 4 digit unicode escape (line 175)
E           
E           stdout mismatch:
E             expected: 'abcdef'
E             actual:   'abcd\\u0065f'
E           
E           Expected stdout: 'abcdef'
E           Actual stdout:   'abcd\\u0065f\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           flags='-e'
E           case $SH in dash) flags='' ;; esac
E           
E           echo $flags 'abcd\u0065f'
E           ---
E           
E           ---
E           
E           Test: echo -e with 8 digit unicode escape (line 187)
E           
E           stdout mismatch:
E             expected: 'abcdef'
E             actual:   'abcd\\U00000065f'
E           
E           Expected stdout: 'abcdef'
E           Actual stdout:   'abcd\\U00000065f\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           flags='-e'
E           case $SH in dash) flags='' ;; esac
E           
E           echo $flags 'abcd\U00000065f'
E           ---
E           
E           ---
E           
E           Test: \0377 is the highest octal byte (line 199)
E           
E           stdout mismatch:
E             expected: ' ff 37'
E             actual:   '  c3  bf  37'
E           
E           Expected stdout: ' ff 37'
E           Actual stdout:   '  c3  bf  37\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo -en '\03777' | od -A n -t x1 | sed 's/ \+/ /g'
E           ---
E           
E           ... and 9 more failures

tests/spec_tests/test_spec.py:249: Failed
_______________ test_bash_spec_file[builtin-eval-source.test.sh] _______________

test_file = 'builtin-eval-source.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 13 passed, 9 failed, 0 skipped
E           ============================================================
E           
E           Test: eval accepts/ignores -- (line 9)
E           
E           stdout mismatch:
E             expected: 'hi'
E             actual:   ''
E           
E           Expected stdout: 'hi'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: --: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           eval -- echo hi
E           ---
E           
E           ---
E           
E           Test: eval usage (line 17)
E           
E           stdout mismatch:
E             expected: '127\n2'
E             actual:   '1\n1'
E           
E           Expected stdout: '127\n2'
E           Actual stdout:   '1\n1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: -: command not found\nbash: -z: command not found\nbash: 127: command not found\nbash: 0: command not found\nbash: 127: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           eval -
E           echo $?
E           eval -z
E           echo $?
E           127
E           0
E           127
E           ---
E           
E           ---
E           
E           Test: eval string with 'break continue return error' (line 39)
E           
E           stdout mismatch:
E             expected: '--- break\n1\nend func\n--- continue\n1\n2\nend func\n--- return\n1\n--- false\n1'
E             actual:   ''
E           
E           Expected stdout: '--- break\n1\nend func\n--- continue\n1\n2\nend func\n--- return\n1\n--- false\n1'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: eval: break 1\n'
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           set -e
E           
E           sh_func_that_evals() {
E             local code_str=$1
E             for i in 1 2; do
E               echo $i
E               eval "$code_str"
E             done
E             echo 'end func'
E           }
E           
E           for code_str in break continue return false; do
E             echo "--- $code_str"
E             sh_func_that_evals "$code_str"
E           done
E           echo status=$?
E           
E           
E           
E           # #### eval YSH block with 'break continue return error'
E           # case $SH in dash|bash*|mksh|zsh) exit ;; esac
E           #
E           # shopt -s ysh:all
E           #
E           # proc proc_that_evals(; ; ;b) {
E           #   for i in 1 2; do
E           #     echo $i
E           #     call io->eval(b)
E           #   done
E           #   echo 'end func'
E           # }
E           #
E           # var cases = [
E           #   ['break', ^(break)],
E           #   ['continue', ^(continue)],
E           #   ['return', ^(return)],
E           #   ['false', ^(false)],
E           # ]
E           #
E           # for test_case in (cases) {
E           #   var code_str, block = test_case
E           #   echo "--- $code_str"
E           #   proc_that_evals (; ; block)
E           # }
E           # echo status=$?
E           #
E           # ## status: 1
E           # ## STDOUT:
E           # --- break
E           # 1
E           # end func
E           # --- continue
E           # 1
E           # 2
E           # end func
E           # --- return
E           # 1
E           # --- false
E           # 1
E           # ## END
E           #
E           # ## N-I dash/bash/mksh/zsh status: 0
E           # ## N-I dash/bash/mksh/zsh STDOUT:
E           # ## END
E           ---
E           
E           ... and 6 more failures

tests/spec_tests/test_spec.py:249: Failed
_________________ test_bash_spec_file[builtin-getopts.test.sh] _________________

test_file = 'builtin-getopts.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 15 passed, 16 failed, 0 skipped
E           ============================================================
E           
E           Test: getopts sees unknown arg (line 10)
E           
E           stdout mismatch:
E             expected: 'status=0 opt=? OPTARG='
E             actual:   'status=0 opt=? OPTARG=Z'
E           
E           Expected stdout: 'status=0 opt=? OPTARG='
E           Actual stdout:   'status=0 opt=? OPTARG=Z\n'
E           Expected stderr: None
E           Actual stderr:   'bash: getopts: illegal option -- Z\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- -Z
E           getopts 'a:' opt
E           echo "status=$? opt=$opt OPTARG=$OPTARG"
E           ---
E           
E           ---
E           
E           Test: getopts with invalid variable name (line 74)
E           
E           stdout mismatch:
E             expected: 'status=1 opt= OPTARG=foo OPTIND=3'
E             actual:   'status=0 opt= OPTARG=foo OPTIND=3'
E           
E           Expected stdout: 'status=1 opt= OPTARG=foo OPTIND=3'
E           Actual stdout:   'status=0 opt= OPTARG=foo OPTIND=3\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- -c foo -h
E           getopts 'hc:' opt-
E           echo status=$? opt=$opt OPTARG=$OPTARG OPTIND=$OPTIND
E           ---
E           
E           ---
E           
E           Test: getopts with invalid flag (line 82)
E           
E           stdout mismatch:
E             expected: 'ERROR 3'
E             actual:   ''
E           
E           Expected stdout: 'ERROR 3'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: getopts: illegal option -- x\n'
E           Expected status: 2
E           Actual status:   2
E           
E           Script:
E           ---
E           set -- -h -x
E           while getopts "hc:" opt; do
E             case $opt in
E               h) FLAG_h=1 ;;
E               c) FLAG_c="$OPTARG" ;;
E               '?') echo ERROR $OPTIND; exit 2; ;;
E             esac
E           done
E           echo status=$?
E           ---
E           
E           ... and 13 more failures

tests/spec_tests/test_spec.py:249: Failed
_______________ test_bash_spec_file[builtin-meta-assign.test.sh] _______________

test_file = 'builtin-meta-assign.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 8 passed, 3 failed, 0 skipped
E           ============================================================
E           
E           Test: builtin declare a=(x y) is allowed (line 4)
E           
E           stdout mismatch:
E             expected: 'declare -a a=([0]="x" [1]="y")\nfail\nfail'
E             actual:   'declare -- a="(x y)"\ndeclare -- a="(x y)"\ndeclare -a a=([0]="x" [1]="y")'
E           
E           Expected stdout: 'declare -a a=([0]="x" [1]="y")\nfail\nfail'
E           Actual stdout:   'declare -- a="(x y)"\ndeclare -- a="(x y)"\ndeclare -a a=([0]="x" [1]="y")\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|zsh|mksh|ash) exit ;; esac
E           
E           $SH -c 'declare a=(x y); declare -p a'
E           if test $? -ne 0; then
E             echo 'fail'
E           fi
E           
E           $SH -c 'builtin declare a=(x y); declare -p a'
E           if test $? -ne 0; then
E             echo 'fail'
E           fi
E           
E           $SH -c 'builtin declare -a a=(x y); declare -p a'
E           if test $? -ne 0; then
E             echo 'fail'
E           fi
E           ---
E           
E           ---
E           
E           Test: export, builtin export (line 89)
E           
E           stdout mismatch:
E             expected: 'a b\na'
E             actual:   'a\na'
E           
E           Expected stdout: 'a b\na'
E           Actual stdout:   'a\na\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x='a b'
E           
E           export y=$x
E           echo $y
E           
E           builtin export z=$x
E           echo $z
E           ---
E           
E           ---
E           
E           Test: \command readonly - similar issue (line 160)
E           
E           stdout mismatch:
E             expected: 'a b\na\na\na'
E             actual:   'a\na\na\na'
E           
E           Expected stdout: 'a b\na\na\na'
E           Actual stdout:   'a\na\na\na\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in zsh) exit ;; esac
E           
E           # \command readonly is equivalent to \builtin declare
E           # except dash implements it
E           
E           x='a b'
E           
E           readonly b=$x
E           echo $b
E           
E           command readonly c=$x
E           echo $c
E           
E           \command readonly d=$x
E           echo $d
E           
E           'command' readonly e=$x
E           echo $e
E           
E           # The issue here is that we have a heuristic in EvalWordSequence2:
E           # fs len(part_vals) == 1
E           
E           
E           
E           # note: later versions of dash are fixed
E           ---

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[builtin-meta.test.sh] ___________________

test_file = 'builtin-meta.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 10 passed, 8 failed, 0 skipped
E           ============================================================
E           
E           Test: command -v (line 3)
E           
E           stdout mismatch:
E             expected: 'echo\n0\nmyfunc\n0\nnonexistent=1\nempty=1\nfor\n0'
E             actual:   'echo\n0\nmyfunc\n0\nnonexistent=1\nempty=1\n1'
E           
E           Expected stdout: 'echo\n0\nmyfunc\n0\nnonexistent=1\nempty=1\nfor\n0'
E           Actual stdout:   'echo\n0\nmyfunc\n0\nnonexistent=1\nempty=1\n1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: command: nonexistent: not found\nbash: command: : not found\nbash: command: for: not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           myfunc() { echo x; }
E           command -v echo
E           echo $?
E           
E           command -v myfunc
E           echo $?
E           
E           command -v nonexistent  # doesn't print anything
E           echo nonexistent=$?
E           
E           command -v ''  # BUG FIX, shouldn't succeed
E           echo empty=$?
E           
E           command -v for
E           echo $?
E           ---
E           
E           ---
E           
E           Test: command -v executable, builtin (line 40)
E           
E           stdout mismatch:
E             expected: '/grep\n/ls\n\ntrue\neval'
E             actual:   '\ntrue\neval'
E           
E           Expected stdout: '/grep\n/ls\n\ntrue\neval'
E           Actual stdout:   '\ntrue\neval\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           #command -v grep ls
E           
E           command -v grep | egrep -o '/[^/]+$'
E           command -v ls | egrep -o '/[^/]+$'
E           echo
E           
E           command -v true
E           command -v eval
E           ---
E           
E           ---
E           
E           Test: command -v with multiple names (line 60)
E           
E           stdout mismatch:
E             expected: 'echo\nmyfunc\nfor\nstatus=0'
E             actual:   'echo\nstatus=0'
E           
E           Expected stdout: 'echo\nmyfunc\nfor\nstatus=0'
E           Actual stdout:   'echo\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # ALL FOUR SHELLS behave differently here!
E           #
E           # bash chooses to swallow the error!  We agree with zsh if ANY word lookup
E           # fails, then the whole thing fails.
E           
E           myfunc() { echo x; }
E           command -v echo myfunc ZZZ for
E           echo status=$?
E           ---
E           
E           ... and 5 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[builtin-misc.test.sh] ___________________

test_file = 'builtin-misc.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 4 passed, 3 failed, 0 skipped
E           ============================================================
E           
E           Test: history builtin usage (line 4)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=0\nstatus=2\nstatus=1\nstatus=1'
E             actual:   'status=0\nstatus=0\nstatus=0\nstatus=0\nstatus=0'
E           
E           Expected stdout: 'status=0\nstatus=0\nstatus=2\nstatus=1\nstatus=1'
E           Actual stdout:   'status=0\nstatus=0\nstatus=0\nstatus=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           history
E           echo status=$?
E           history +5  # hm bash considers this valid
E           echo status=$?
E           history -5  # invalid flag
E           echo status=$?
E           history f 
E           echo status=$?
E           history too many args
E           echo status=$?
E           ---
E           
E           ---
E           
E           Test: Print shell strings with weird chars: set and printf %q and ${x@Q} (line 47)
E           
E           stdout mismatch:
E             expected: "foo=$'a\\nb\\001c\\'d'\npf  $'a\\nb\\001c\\'d'\n@Q  $'a\\nb\\001c\\'d'"
E             actual:   "foo=''\npf  ''\n@Q  ''"
E           
E           Expected stdout: "foo=$'a\\nb\\001c\\'d'\npf  $'a\\nb\\001c\\'d'\n@Q  $'a\\nb\\001c\\'d'"
E           Actual stdout:   "foo=''\npf  ''\n@Q  ''\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash declare -p will print binary data, which makes this invalid UTF-8!
E           foo=$(/bin/echo -e 'a\nb\xffc'\'d)
E           
E           # let's test the easier \x01, which doesn't give bash problems
E           foo=$(/bin/echo -e 'a\nb\x01c'\'d)
E           
E           # dash:
E           #   only supports 'set'; prints it on multiple lines with binary data
E           #   switches to "'" for single quotes, not \'
E           # zsh:
E           #   print binary data all the time, except for printf %q
E           #   does print $'' strings
E           # mksh:
E           #   prints binary data for @Q
E           #   prints $'' strings
E           
E           # All are very inconsistent.
E           
E           case $SH in dash|mksh|zsh) return ;; esac
E           
E           
E           set | grep '^foo='
E           
E           # Will print multi-line and binary data literally!
E           #declare -p foo
E           
E           printf 'pf  %q\n' "$foo"
E           
E           echo '@Q ' ${foo@Q}
E           ---
E           
E           ---
E           
E           Test: Print shell strings with normal chars: set and printf %q and ${x@Q} (line 88)
E           
E           stdout mismatch:
E             expected: 'foo=spam\ndeclare -- foo="spam"\npf  spam\n@Q  \'spam\''
E             actual:   'foo=\'spam\'\ndeclare -- foo="spam"\npf  spam\n@Q  \'spam\''
E           
E           Expected stdout: 'foo=spam\ndeclare -- foo="spam"\npf  spam\n@Q  \'spam\''
E           Actual stdout:   'foo=\'spam\'\ndeclare -- foo="spam"\npf  spam\n@Q  \'spam\'\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # There are variations on whether quotes are printed
E           
E           case $SH in dash|zsh) return ;; esac
E           
E           foo=spam
E           
E           set | grep '^foo='
E           
E           # Will print multi-line and binary data literally!
E           typeset -p foo
E           
E           printf 'pf  %q\n' "$foo"
E           
E           echo '@Q ' ${foo@Q}
E           ---

tests/spec_tests/test_spec.py:249: Failed
_________________ test_bash_spec_file[builtin-printf.test.sh] __________________

test_file = 'builtin-printf.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 29 passed, 34 failed, 0 skipped
E           ============================================================
E           
E           Test: printf -v a[1] (line 60)
E           
E           stdout mismatch:
E             expected: "status=0\n['a', 'foo', 'c']"
E             actual:   "status=0\n['a', 'b', 'c']"
E           
E           Expected stdout: "status=0\n['a', 'foo', 'c']"
E           Actual stdout:   "status=0\n['a', 'b', 'c']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           a=(a b c)
E           printf -v 'a[1]' %s 'foo'
E           echo status=$?
E           argv.py "${a[@]}"
E           ---
E           
E           ---
E           
E           Test: printf -v syntax error (line 76)
E           
E           stdout mismatch:
E             expected: 'status=2'
E             actual:   'status=0'
E           
E           Expected stdout: 'status=2'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           printf -v 'a[' %s 'foo'
E           echo status=$?
E           ---
E           
E           ---
E           
E           Test: printf -v dynamic scope (line 115)
E           
E           stdout mismatch:
E             expected: 'dollar=dollar\n--\ndollar=\\$\nmylocal=mylocal\n--\ndollar=\\$\nmylocal='
E             actual:   "dollar=dollar\n--\ndollar=$'$'\nmylocal=mylocal\n--\ndollar=$'$'\nmylocal="
E           
E           Expected stdout: 'dollar=dollar\n--\ndollar=\\$\nmylocal=mylocal\n--\ndollar=\\$\nmylocal='
E           Actual stdout:   "dollar=dollar\n--\ndollar=$'$'\nmylocal=mylocal\n--\ndollar=$'$'\nmylocal=\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in mksh|zsh|dash|ash) echo not implemented; exit ;; esac
E           # OK so printf is like assigning to a var.
E           # printf -v foo %q "$bar" is like
E           # foo=${bar@Q}
E           dollar='dollar'
E           f() {
E             local mylocal=foo
E             printf -v dollar %q '$'  # assign foo to a quoted dollar
E             printf -v mylocal %q 'mylocal'
E             echo dollar=$dollar
E             echo mylocal=$mylocal
E           }
E           echo dollar=$dollar
E           echo --
E           f
E           echo --
E           echo dollar=$dollar
E           echo mylocal=$mylocal
E           ---
E           
E           ... and 31 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[builtin-read.test.sh] ___________________

test_file = 'builtin-read.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 28 passed, 36 failed, 0 skipped
E           ============================================================
E           
E           Test: read builtin with no newline returns status 1 (line 52)
E           
E           stdout mismatch:
E             expected: 'status=1\nZZZ'
E             actual:   'status=0\nZZZ'
E           
E           Expected stdout: 'status=1\nZZZ'
E           Actual stdout:   'status=0\nZZZ\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # This is odd because the variable is populated successfully.  OSH/YSH might
E           # need a separate put reading feature that doesn't use IFS.
E           
E           echo -n ZZZ | { read x; echo status=$?; echo $x; }
E           ---
E           
E           ---
E           
E           Test: read -n doesn't strip whitespace (bug fix) (line 100)
E           
E           stdout mismatch:
E             expected: '[  a ]\n[  a b]\n[  a b ]\n\none var strips whitespace\n[a]\n[a b]\n[a b]\n\nthree vars\n[a] [] []\n[a] [b] []\n[a] [b] []'
E             actual:   '[]\n[]\n[]\n\none var strips whitespace\n[]\n[]\n[]\n\nthree vars\n[] [] []\n[] [] []\n[] [] []'
E           
E           Expected stdout: '[  a ]\n[  a b]\n[  a b ]\n\none var strips whitespace\n[a]\n[a b]\n[a b]\n\nthree vars\n[a] [] []\n[a] [b] []\n[a] [b] []'
E           Actual stdout:   '[]\n[]\n[]\n\none var strips whitespace\n[]\n[]\n[]\n\nthree vars\n[] [] []\n[] [] []\n[] [] []\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|zsh) exit ;; esac
E           
E           echo '  a b  ' | (read -n 4; echo "[$REPLY]")
E           echo '  a b  ' | (read -n 5; echo "[$REPLY]")
E           echo '  a b  ' | (read -n 6; echo "[$REPLY]")
E           echo
E           
E           echo 'one var strips whitespace'
E           echo '  a b  ' | (read -n 4 myvar; echo "[$myvar]")
E           echo '  a b  ' | (read -n 5 myvar; echo "[$myvar]")
E           echo '  a b  ' | (read -n 6 myvar; echo "[$myvar]")
E           echo
E           
E           echo 'three vars'
E           echo '  a b  ' | (read -n 4 x y z; echo "[$x] [$y] [$z]")
E           echo '  a b  ' | (read -n 5 x y z; echo "[$x] [$y] [$z]")
E           echo '  a b  ' | (read -n 6 x y z; echo "[$x] [$y] [$z]")
E           ---
E           
E           ---
E           
E           Test: read -d -n - respects delimiter and splits (line 154)
E           
E           stdout mismatch:
E             expected: 'delim c\n[  a]\n[  a ]\n[  a b]\n\none var\n[a]\n[a]\n[a b]\n\nthree vars\n[a] [] []\n[a] [] []\n[a] [b] []'
E             actual:   'delim c\n[]\n[]\n[]\n\none var\n[]\n[]\n[]\n\nthree vars\n[] [] []\n[] [] []\n[] [] []'
E           
E           Expected stdout: 'delim c\n[  a]\n[  a ]\n[  a b]\n\none var\n[a]\n[a]\n[a b]\n\nthree vars\n[a] [] []\n[a] [] []\n[a] [b] []'
E           Actual stdout:   'delim c\n[]\n[]\n[]\n\none var\n[]\n[]\n[]\n\nthree vars\n[] [] []\n[] [] []\n[] [] []\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|zsh|ash) exit ;; esac
E           
E           echo 'delim c'
E           echo '  a b c ' | (read -d 'c' -n 3; echo "[$REPLY]")
E           echo '  a b c ' | (read -d 'c' -n 4; echo "[$REPLY]")
E           echo '  a b c ' | (read -d 'c' -n 5; echo "[$REPLY]")
E           echo
E           
E           echo 'one var'
E           echo '  a b c ' | (read -d 'c' -n 3 myvar; echo "[$myvar]")
E           echo '  a b c ' | (read -d 'c' -n 4 myvar; echo "[$myvar]")
E           echo '  a b c ' | (read -d 'c' -n 5 myvar; echo "[$myvar]")
E           echo
E           
E           echo 'three vars'
E           echo '  a b c ' | (read -d 'c' -n 3 x y z; echo "[$x] [$y] [$z]")
E           echo '  a b c ' | (read -d 'c' -n 4 x y z; echo "[$x] [$y] [$z]")
E           echo '  a b c ' | (read -d 'c' -n 5 x y z; echo "[$x] [$y] [$z]")
E           ---
E           
E           ... and 33 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[builtin-set.test.sh] ___________________

test_file = 'builtin-set.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 11 passed, 13 failed, 0 skipped
E           ============================================================
E           
E           Test: set -u with undefined variable exits the interpreter (line 44)
E           
E           stdout mismatch:
E             expected: 'before\nOK\nbefore\nOK'
E             actual:   'before\nOK\nOK'
E           
E           Expected stdout: 'before\nOK\nbefore\nOK'
E           Actual stdout:   'before\nOK\nOK\n'
E           Expected stderr: None
E           Actual stderr:   'bash: x: unbound variable\nbash: -i: No such file or directory\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # non-interactive
E           $SH -c 'set -u; echo before; echo $x; echo after'
E           if test $? -ne 0; then
E             echo OK
E           fi
E           
E           # interactive
E           $SH -i -c 'set -u; echo before; echo $x; echo after'
E           if test $? -ne 0; then
E             echo OK
E           fi
E           ---
E           
E           ---
E           
E           Test: set -u with undefined var in interactive shell does NOT exit the interpreter (line 66)
E           
E           stdout mismatch:
E             expected: 'before\nOK\nbefore\nline2'
E             actual:   'before\nOK\nOK'
E           
E           Expected stdout: 'before\nOK\nbefore\nline2'
E           Actual stdout:   'before\nOK\nOK\n'
E           Expected stderr: None
E           Actual stderr:   'bash: x: unbound variable\nbash: -i: No such file or directory\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # In bash, it aborts the LINE only.  The next line is executed!
E           
E           # non-interactive
E           $SH -c 'set -u; echo before; echo $x; echo after
E           echo line2
E           '
E           if test $? -ne 0; then
E             echo OK
E           fi
E           
E           # interactive
E           $SH -i -c 'set -u; echo before; echo $x; echo after
E           echo line2
E           '
E           if test $? -ne 0; then
E             echo OK
E           fi
E           ---
E           
E           ---
E           
E           Test: set -u error can break out of nested evals (line 101)
E           
E           Execution error: Expected compound command as function body at line 1, column 27
E           
E           
E           Script:
E           ---
E           $SH -c '
E           set -u
E           test_function_2() {
E             x=$blarg
E           }
E           test_function() {
E             eval "test_function_2"
E           }
E           
E           echo before
E           eval test_function
E           echo after
E           '
E           # status must be non-zero: bash uses 1, ash/dash exit 2
E           if test $? -ne 0; then
E             echo OK
E           fi
E           ---
E           
E           ... and 10 more failures

tests/spec_tests/test_spec.py:249: Failed
_________________ test_bash_spec_file[builtin-special.test.sh] _________________

test_file = 'builtin-special.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 3 passed, 7 failed, 2 skipped
E           ============================================================
E           
E           Test: Prefix assignments persist after special builtins, like : (set -o posix) (line 30)
E           
E           stdout mismatch:
E             expected: 'foo=bar\nz='
E             actual:   'foo=\nz='
E           
E           Expected stdout: 'foo=bar\nz='
E           Actual stdout:   'foo=\nz=\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: posix: invalid option name\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in
E             bash) set -o posix ;;
E           esac
E           
E           foo=bar :
E           echo foo=$foo
E           
E           # Not true when you use 'builtin'
E           z=Z builtin :
E           echo z=$Z
E           ---
E           
E           ---
E           
E           Test: Prefix assignments persist after readonly, but NOT exported (set -o posix) (line 52)
E           
E           stdout mismatch:
E             expected: 'foo=bar\nspam=eggs\nbar\nNone'
E             actual:   'foo=\nspam=eggs\nNone\neggs'
E           
E           Expected stdout: 'foo=bar\nspam=eggs\nbar\nNone'
E           Actual stdout:   'foo=\nspam=eggs\nNone\neggs\n'
E           Expected stderr: None
E           Actual stderr:   'bash: set: posix: invalid option name\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Bash only implements it behind the posix option
E           case $SH in
E             bash) set -o posix ;;
E           esac
E           foo=bar readonly spam=eggs
E           echo foo=$foo
E           echo spam=$spam
E           
E           # should NOT be exported
E           printenv.py foo
E           printenv.py spam
E           ---
E           
E           ---
E           
E           Test: Prefix binding for exec is a special case (versus e.g. readonly) (line 79)
E           
E           stdout mismatch:
E             expected: 'pre1= x= pre2=pre2'
E             actual:   'pre1= x=x pre2=pre2'
E           
E           Expected stdout: 'pre1= x= pre2=pre2'
E           Actual stdout:   'pre1= x=x pre2=pre2\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           pre1=pre1 readonly x=x
E           pre2=pre2 exec sh -c 'echo pre1=$pre1 x=$x pre2=$pre2'
E           ---
E           
E           ... and 4 more failures

tests/spec_tests/test_spec.py:249: Failed
________________ test_bash_spec_file[builtin-type-bash.test.sh] ________________

test_file = 'builtin-type-bash.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 15 passed, 16 failed, 0 skipped
E           ============================================================
E           
E           Test: type -t -> keyword (line 27)
E           
E           Execution error: Expected command after ! at line 1, column 26
E           
E           
E           Script:
E           ---
E           type -t for time ! fi do {
E           ---
E           
E           ---
E           
E           Test: type -t doesn't find non-executable (like command -v) (line 67)
E           
E           stdout mismatch:
E             expected: 'file'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: 'file'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           PATH="$TMP:$PATH"
E           touch $TMP/non-executable
E           type -t non-executable
E           ---
E           
E           ---
E           
E           Test: type -p and -P builtin -> file (line 90)
E           
E           stdout mismatch:
E             expected: '/tmp/mv\n/tmp/tar\n/tmp/grep\n--\n/tmp/mv\n/tmp/tar\n/tmp/grep'
E             actual:   'mv\ntar\ngrep\n--\nmv is mv\ntar is tar\ngrep is grep'
E           
E           Expected stdout: '/tmp/mv\n/tmp/tar\n/tmp/grep\n--\n/tmp/mv\n/tmp/tar\n/tmp/grep'
E           Actual stdout:   'mv\ntar\ngrep\n--\nmv is mv\ntar is tar\ngrep is grep\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch /tmp/{mv,tar,grep}
E           chmod +x /tmp/{mv,tar,grep}
E           PATH=/tmp:$PATH
E           
E           type -p mv tar grep
E           echo --
E           type -P mv tar grep
E           ---
E           
E           ... and 13 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[builtin-type.test.sh] ___________________

test_file = 'builtin-type.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 1 passed, 3 failed, 2 skipped
E           ============================================================
E           
E           Test: type -> alias external (line 17)
E           
E           Execution error: Expected '}' to close command group at line 13, column 245
E           
E           
E           Script:
E           ---
E           mkdir -p _tmp
E           shopt -s expand_aliases || true  # bash
E           
E           alias ll='ls -l'
E           
E           touch _tmp/date
E           chmod +x _tmp/date
E           PATH=_tmp:/bin
E           
E           normalize() {
E             # ignore quotes and backticks
E             # bash prints a left backtick
E             quotes='"`'\'
E             sed \
E               -e "s/[$quotes]//g" \
E               -e 's/shell function/function/' \
E               -e 's/is aliased to/is an alias for/'
E           }
E           
E           type ll date | normalize
E           
E           # Note: both procs and funcs go in var namespace?  So they don't respond to
E           # 'type'?
E           ---
E           
E           ---
E           
E           Test: type of relative path (line 51)
E           
E           stdout mismatch:
E             expected: '_tmp/ex is _tmp/ex'
E             actual:   'bash: type: _tmp/file: not found\nbash: type: _tmp/ex: not found'
E           
E           Expected stdout: '_tmp/ex is _tmp/ex'
E           Actual stdout:   'bash: type: _tmp/file: not found\nbash: type: _tmp/ex: not found\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           mkdir -p _tmp
E           touch _tmp/file _tmp/ex
E           chmod +x _tmp/ex
E           
E           type _tmp/file _tmp/ex
E           
E           # dash and ash don't care if it's executable
E           # mksh
E           ---
E           
E           ---
E           
E           Test: type -> not found (line 77)
E           
E           stdout mismatch:
E             expected: 'status=1\nzz: not found'
E             actual:   'bash: type: zz: not found\nstatus=1'
E           
E           Expected stdout: 'status=1\nzz: not found'
E           Actual stdout:   'bash: type: zz: not found\nstatus=1\n'
E           Expected stderr: ''
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           type zz 2>err.txt
E           echo status=$?
E           
E           # for bash and OSH: print to stderr
E           fgrep -o 'zz: not found' err.txt || true
E           
E           # zsh and mksh behave the same - status 1
E           # dash and ash behave the same - status 127
E           ---

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[builtin-vars.test.sh] ___________________

test_file = 'builtin-vars.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 21 passed, 20 failed, 0 skipped
E           ============================================================
E           
E           Test: Export sets a global variable that persists after export -n (line 19)
E           
E           stdout mismatch:
E             expected: 'X\nX\nX\nNone'
E             actual:   'X\nX\nX\nX'
E           
E           Expected stdout: 'X\nX\nX\nNone'
E           Actual stdout:   'X\nX\nX\nX\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f() { export GLOBAL=X; }
E           f
E           echo $GLOBAL
E           printenv.py GLOBAL
E           export -n GLOBAL
E           echo $GLOBAL
E           printenv.py GLOBAL
E           ---
E           
E           ---
E           
E           Test: Export a local that shadows a global (line 116)
E           
E           stdout mismatch:
E             expected: 'local1\nNone\nglobal'
E             actual:   'local1\nglobal\nglobal'
E           
E           Expected stdout: 'local1\nNone\nglobal'
E           Actual stdout:   'local1\nglobal\nglobal\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           V=global
E           f() {
E             local V=local1
E             export V
E             printenv.py V
E           }
E           f
E           printenv.py V  # exported local out of scope; global isn't exported yet
E           export V
E           printenv.py V  # now it's exported
E           ---
E           
E           ---
E           
E           Test: Unset exported variable, then define it again.  It's NOT still exported. (line 139)
E           
E           stdout mismatch:
E             expected: 'u\nNone\nnewvalue\nNone'
E             actual:   'u\nNone\nnewvalue\nnewvalue'
E           
E           Expected stdout: 'u\nNone\nnewvalue\nNone'
E           Actual stdout:   'u\nNone\nnewvalue\nnewvalue\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           export U
E           U=u
E           printenv.py U
E           unset U
E           printenv.py U
E           U=newvalue
E           echo $U
E           printenv.py U
E           ---
E           
E           ... and 17 more failures

tests/spec_tests/test_spec.py:249: Failed
______________________ test_bash_spec_file[case_.test.sh] ______________________

test_file = 'case_.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 10 passed, 3 failed, 0 skipped
E           ============================================================
E           
E           Test: Quoted literal in glob pattern (line 98)
E           
E           stdout mismatch:
E             expected: 'match'
E             actual:   ''
E           
E           Expected stdout: 'match'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x='[ab].py'
E           pat='[ab].py'
E           case "$x" in
E             "$pat") echo match ;;
E           esac
E           ---
E           
E           ---
E           
E           Test: \(\) in pattern (regression) (line 211)
E           
E           stdout mismatch:
E             expected: 'match\nextglob'
E             actual:   'extglob'
E           
E           Expected stdout: 'match\nextglob'
E           Actual stdout:   'extglob\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           s='foo()'
E           
E           case $s in
E             *\(\)) echo 'match'
E           esac
E           
E           case $SH in dash) exit ;; esac  # not implemented
E           
E           shopt -s extglob
E           
E           case $s in
E             *(foo|bar)'()') echo 'extglob'
E           esac
E           ---
E           
E           ---
E           
E           Test: case \n bug regression (line 234)
E           
E           Execution error: Expected word after 'case' at line 2, column 1
E           
E           
E           Script:
E           ---
E           case
E           in esac
E           ---

tests/spec_tests/test_spec.py:249: Failed
_________________ test_bash_spec_file[command-parsing.test.sh] _________________

test_file = 'command-parsing.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 3 passed, 2 failed, 0 skipped
E           ============================================================
E           
E           Test: Redirect on control flow (ignored in OSH) (line 41)
E           
E           stdout mismatch:
E             expected: 'REDIRECTED'
E             actual:   'NO'
E           
E           Expected stdout: 'REDIRECTED'
E           Actual stdout:   'NO\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           rm -f _tmp/r.txt
E           for x in a b c; do
E             break > _tmp/r.txt
E           done
E           if test -f _tmp/r.txt; then
E             echo REDIRECTED
E           else
E             echo NO
E           fi
E           ---
E           
E           ---
E           
E           Test: Redirect on control flow with ysh:all (no_parse_ignored) (line 55)
E           
E           stdout mismatch:
E             expected: 'REDIRECTED'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: 'REDIRECTED'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: shopt: ysh:all: invalid shell option name\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           shopt -s ysh:all
E           rm -f _tmp/r.txt
E           for x in a b c; do
E             break > _tmp/r.txt
E           done
E           test -f _tmp/r.txt && echo REDIRECTED
E           ---

tests/spec_tests/test_spec.py:249: Failed
_________________ test_bash_spec_file[command-sub-ksh.test.sh] _________________

test_file = 'command-sub-ksh.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 0 passed, 4 failed, 0 skipped
E           ============================================================
E           
E           Test: ${ echo hi;} (line 6)
E           
E           stdout mismatch:
E             expected: '[hi]\n\n[one\ntwo]\n\n[ 3\n 4 5 ]'
E             actual:   '[]\n\n[]\n\n[]\n\n[]'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: '[hi]\n\n[one\ntwo]\n\n[ 3 \n 4 5 ]\n'
E           Actual stdout:   '[]\n\n[]\n\n[]\n\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           x=${ echo hi;}
E           echo "[$x]"
E           echo
E           
E           # trailing space allowed
E           x=${ echo one; echo two; }
E           echo "[$x]"
E           echo
E           
E           myfunc() {
E             echo ' 3 '
E             echo ' 4 5 '
E           }
E           
E           x=${ myfunc;}
E           echo "[$x]"
E           echo
E           
E           # SYNTAX ERROR
E           x=${myfunc;}
E           echo "[$x]"
E           ---
E           
E           ---
E           
E           Test: ${ echo hi }  without semi-colon (line 43)
E           
E           stdout mismatch:
E             expected: '[no-semi]\n[no-space]'
E             actual:   '[]\n[]'
E           status mismatch: expected 127, got 0
E           
E           Expected stdout: '[no-semi]\n[no-space]'
E           Actual stdout:   '[]\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 127
E           Actual status:   0
E           
E           Script:
E           ---
E           x=${ echo no-semi }
E           echo "[$x]"
E           
E           x=${ echo no-space}
E           echo "[$x]"
E           
E           # damn I wanted to take this over!  mksh executes it!
E           x=${ ~/ysh-tilde-sub }
E           
E           # echo ${ ~/ysh-tilde-sub }
E           ---
E           
E           ---
E           
E           Test: ${|REPLY=hi} (line 63)
E           
E           stdout mismatch:
E             expected: '[ reply var ]\n\n[from file]'
E             actual:   '[]\n\n[]\n\n[]'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: '[ reply var ]\n\n[from file]\n'
E           Actual stdout:   '[]\n\n[]\n\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           x=${|y=" reply var "; REPLY=$y}
E           echo "[$x]"
E           echo
E           
E           echo '  from file  ' > tmp.txt
E           
E           x=${|read -r < tmp.txt}
E           echo "[$x]"
E           echo
E           
E           # SYNTAX ERROR
E           x=${ |REPLY=zz}
E           echo "[$x]"
E           ---
E           
E           ... and 1 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[command-sub.test.sh] ___________________

test_file = 'command-sub.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 17 passed, 13 failed, 0 skipped
E           ============================================================
E           
E           Test: case in subshell (line 7)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 5, column 46
E           
E           
E           Script:
E           ---
E           # Hm this subhell has to know about the closing ) and stuff like that.
E           # case_clause is a compound_command, which is a command.  And a subshell
E           # takes a compound_list, which is a list of terms, which has and_ors in them
E           # ... which eventually boils down to a command.
E           echo $(foo=a; case $foo in [0-9]) echo number;; [a-z]) echo letter ;; esac)
E           ---
E           
E           ---
E           
E           Test: Nested backticks (line 28)
E           
E           stdout mismatch:
E             expected: '000000-first'
E             actual:   '000000-first _keep'
E           
E           Expected stdout: '000000-first'
E           Actual stdout:   '000000-first _keep\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Inner `` are escaped!  Not sure how to do triple..  Seems like an unlikely
E           # use case.  Not sure if I even want to support this!
E           echo X > $TMP/000000-first
E           echo `\`echo -n l; echo -n s\` $TMP | grep 000000-first`
E           ---
E           
E           ---
E           
E           Test: Making keyword out of command sub should NOT work (line 40)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 1, column 33
E           
E           
E           Script:
E           ---
E           $(echo f)$(echo or) i in a b c; do echo $i; done
E           echo status=$?
E           ---
E           
E           ... and 10 more failures

tests/spec_tests/test_spec.py:249: Failed
____________________ test_bash_spec_file[command_.test.sh] _____________________

test_file = 'command_.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 1 passed, 15 failed, 0 skipped
E           ============================================================
E           
E           Test: Command block (line 5)
E           
E           stdout mismatch:
E             expected: '/bin/ls'
E             actual:   '/usr/bin/ls'
E           
E           Expected stdout: '/bin/ls'
E           Actual stdout:   '/usr/bin/ls\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           PATH=/bin
E           
E           { which ls; }
E           ---
E           
E           ---
E           
E           Test: Permission denied (line 11)
E           
E           status mismatch: expected 126, got 1
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: /tmp/text-file: command not found\n'
E           Expected status: 126
E           Actual status:   1
E           
E           Script:
E           ---
E           touch $TMP/text-file
E           $TMP/text-file
E           ---
E           
E           ---
E           
E           Test: Not a dir (line 16)
E           
E           status mismatch: expected 127, got 1
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: /tmp/not-a-dir/text-file: command not found\n'
E           Expected status: 127
E           Actual status:   1
E           
E           Script:
E           ---
E           $TMP/not-a-dir/text-file
E           ---
E           
E           ... and 12 more failures

tests/spec_tests/test_spec.py:249: Failed
____________________ test_bash_spec_file[dbracket.test.sh] _____________________

test_file = 'dbracket.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 34 passed, 15 failed, 0 skipped
E           ============================================================
E           
E           Test: [[ compare with literal -f (compare with test-builtin.test.sh) (line 166)
E           
E           Execution error: Expected ']]' to close conditional at line 3, column 12
E           
E           
E           Script:
E           ---
E           var=-f
E           [[ $var == -f ]] && echo true
E           [[ '-f' == $var ]] && echo true
E           ---
E           
E           ---
E           
E           Test: [[ with op variable (compare with test-builtin.test.sh) (line 175)
E           
E           Execution error: Expected ']]' to close conditional at line 3, column 6
E           
E           
E           Script:
E           ---
E           # Parse error -- parsed BEFORE evaluation of vars
E           op='=='
E           [[ a $op a ]] && echo true
E           [[ a $op b ]] || echo false
E           ---
E           
E           ---
E           
E           Test: [[ at runtime doesn't work (line 188)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 2, column 22
E           
E           
E           Script:
E           ---
E           dbracket=[[
E           $dbracket foo == foo ]]
E           ---
E           
E           ... and 12 more failures

tests/spec_tests/test_spec.py:249: Failed
_____________________ test_bash_spec_file[dparen.test.sh] ______________________

test_file = 'dparen.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 9 passed, 6 failed, 0 skipped
E           ============================================================
E           
E           Test: bash: K in (( A[K] = V )) is a constant string (line 68)
E           
E           stdout mismatch:
E             expected: 'A[5]=\nkeys = K\nvalues = 42'
E             actual:   'A[5]=\nkeys = 5\nvalues = 42'
E           
E           Expected stdout: 'A[5]=\nkeys = K\nvalues = 42'
E           Actual stdout:   'A[5]=\nkeys = 5\nvalues = 42\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           K=5
E           V=42
E           typeset -A A
E           (( A[K] = V ))
E           
E           echo A["5"]=${A["5"]}
E           echo keys = ${!A[@]}
E           echo values = ${A[@]}
E           ---
E           
E           ---
E           
E           Test: bash: V in (( A["K"] = V )) gets coerced to integer (line 108)
E           
E           stdout mismatch:
E             expected: 'A["key"]=\nkeys = K\nvalues = 0'
E             actual:   'A["key"]=\nkeys = 0\nvalues = 0'
E           
E           Expected stdout: 'A["key"]=\nkeys = K\nvalues = 0'
E           Actual stdout:   'A["key"]=\nkeys = 0\nvalues = 0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -u strict_arith || true
E           K=key
E           V=value
E           typeset -A A || exit 1
E           (( A["K"] = V ))
E           
E           # not there!
E           echo A[\"key\"]=${A[$K]}
E           
E           echo keys = ${!A[@]}
E           echo values = ${A[@]}
E           ---
E           
E           ---
E           
E           Test: literal strings inside (( )) (line 130)
E           
E           stdout mismatch:
E             expected: '42 0'
E             actual:   '42'
E           
E           Expected stdout: '42 0'
E           Actual stdout:   '42\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -A A
E           A['x']=42
E           (( x = A['x'] ))
E           (( A['y'] = 'y' ))  # y is a variable, gets coerced to 0
E           echo $x ${A['y']}
E           ---
E           
E           ... and 3 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[empty-bodies.test.sh] ___________________

test_file = 'empty-bodies.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 1 passed, 2 failed, 0 skipped
E           ============================================================
E           
E           Test: Empty do/done (line 3)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'empty'
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'empty\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           while false; do
E           done
E           echo empty
E           ---
E           
E           ---
E           
E           Test: Empty then/fi (line 17)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'empty'
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'empty\n'
E           Expected stderr: None
E           Actual stderr:   'bash: foo: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           if foo; then
E           fi
E           echo empty
E           ---

tests/spec_tests/test_spec.py:249: Failed
_____________________ test_bash_spec_file[errexit.test.sh] _____________________

test_file = 'errexit.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 22 passed, 13 failed, 0 skipped
E           ============================================================
E           
E           Test: errexit for nonexistent command (line 13)
E           
E           status mismatch: expected 127, got 1
E           
E           Expected stdout: ''
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: nonexistent__ZZ: command not found\n'
E           Expected status: 127
E           Actual status:   1
E           
E           Script:
E           ---
E           set -o errexit
E           nonexistent__ZZ
E           echo done
E           ---
E           
E           ---
E           
E           Test: errexit with { } (line 27)
E           
E           stdout mismatch:
E             expected: 'one'
E             actual:   ''
E           
E           Expected stdout: 'one'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   1
E           
E           Script:
E           ---
E           # This aborts because it's not part of an if statement.
E           set -o errexit
E           { echo one; false; echo two; }
E           ---
E           
E           ---
E           
E           Test: More && || (line 73)
E           
E           stdout mismatch:
E             expected: 'group\nstatus=1\n\nsubshell\nstatus=42\n\nstatus=1'
E             actual:   'status=1\n\nsubshell\nstatus=42\n\nstatus=1'
E           
E           Expected stdout: 'group\nstatus=1\n\nsubshell\nstatus=42\n\nstatus=1'
E           Actual stdout:   'status=1\n\nsubshell\nstatus=42\n\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: /bin/false: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           $SH -c 'set -e; false || { echo group; false; }; echo bad'
E           echo status=$?
E           echo
E           
E           $SH -c 'set -e; false || ( echo subshell; exit 42 ); echo bad'
E           echo status=$?
E           echo
E           
E           # noforklast optimization
E           $SH -c 'set -e; false || /bin/false; echo bad'
E           echo status=$?
E           ---
E           
E           ... and 10 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[exit-status.test.sh] ___________________

test_file = 'exit-status.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 6 passed, 5 failed, 0 skipped
E           ============================================================
E           
E           Test: Truncating 'exit' status (line 10)
E           
E           stdout mismatch:
E             expected: 'status=255\nstatus=0\nstatus=1\n===\nstatus=255\nstatus=254'
E             actual:   ''
E           
E           Expected stdout: 'status=255\nstatus=0\nstatus=1\n===\nstatus=255\nstatus=254'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   255
E           
E           Script:
E           ---
E           $SH -c 'exit 255'
E           echo status=$?
E           
E           $SH -c 'exit 256'
E           echo status=$?
E           
E           $SH -c 'exit 257'
E           echo status=$?
E           
E           echo ===
E           
E           $SH -c 'exit -1'
E           echo status=$?
E           
E           $SH -c 'exit -2'
E           echo status=$?
E           ---
E           
E           ---
E           
E           Test: subshell OverflowError https://github.com/oilshell/oil/issues/996 (line 83)
E           
E           Execution error: return 255
E           
E           
E           Script:
E           ---
E           # We have to capture stderr here 
E           
E           filter_err() {
E             # check for bash/dash/mksh messages, and unwanted Python OverflowError
E             egrep -o 'Illegal number|bad number|return: can only|expected a small integer|OverflowError'
E             return 0
E           }
E           
E           # true; disables subshell optimization!
E           
E           # exit status too big, but integer isn't
E           $SH -c 'true; ( return 2147483647; )' 2>err.txt
E           echo status=$?
E           cat err.txt | filter_err
E           
E           # now integer is too big
E           $SH -c 'true; ( return 2147483648; )' 2> err.txt
E           echo status=$?
E           cat err.txt | filter_err
E           
E           # even bigger
E           $SH -c 'true; ( return 2147483649; )' 2> err.txt
E           echo status=$?
E           cat err.txt | filter_err
E           
E           echo
E           echo '--- negative ---'
E           
E           # negative vlaues
E           $SH -c 'true; ( return -2147483648; )' 2>err.txt
E           echo status=$?
E           cat err.txt | filter_err
E           
E           # negative vlaues
E           $SH -c 'true; ( return -2147483649; )' 2>err.txt
E           echo status=$?
E           cat err.txt | filter_err
E           
E           
E           # osh-cpp checks overflow, but osh-py doesn't
E           
E           
E           # mksh behaves similarly, uses '1' as its "bad status" status!
E           
E           
E           # dash is similar, but seems to reject negative numbers
E           
E           
E           # bash disallows return at top level
E           ---
E           
E           ---
E           
E           Test: If subshell true (line 263)
E           
E           stdout mismatch:
E             expected: 'TRUE'
E             actual:   'FALSE'
E           
E           Expected stdout: 'TRUE'
E           Actual stdout:   'FALSE\n'
E           Expected stderr: None
E           Actual stderr:   'bash: : command not found\n'
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           if `true`; then echo TRUE; else echo FALSE; fi
E           ---
E           
E           ... and 2 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[extglob-files.test.sh] __________________

test_file = 'extglob-files.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 15 passed, 7 failed, 1 skipped
E           ============================================================
E           
E           Test: Two adjacent alternations (line 76)
E           
E           stdout mismatch:
E             expected: '2/ab 2/ac 2/cb 2/cc\n2/ab 2/ac 2/bb 2/bc 2/cb 2/cc\n2/ab 2/ac'
E             actual:   '2/ab 2/ac 2/bb 2/bc 2/cb 2/cc\n2/ab 2/ac 2/bb 2/bc 2/cb 2/cc\n2/ab 2/ac'
E           
E           Expected stdout: '2/ab 2/ac 2/cb 2/cc\n2/ab 2/ac 2/bb 2/bc 2/cb 2/cc\n2/ab 2/ac'
E           Actual stdout:   '2/ab 2/ac 2/bb 2/bc 2/cb 2/cc\n2/ab 2/ac 2/bb 2/bc 2/cb 2/cc\n2/ab 2/ac\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s extglob
E           mkdir -p 2
E           touch 2/{aa,ab,ac,ba,bb,bc,ca,cb,cc}
E           echo 2/!(b)@(b|c)
E           echo 2/!(b)?@(b|c)  # wildcard in between
E           echo 2/!(b)a@(b|c)  # constant in between
E           ---
E           
E           ---
E           
E           Test: Glob other punctuation chars (lexer mode) (line 133)
E           
E           stdout mismatch:
E             expected: "['__#', '__&&', '__<>', '__aa', '__{}']"
E             actual:   "['@(__aa|__<>|__{}|__#|__&&|)']"
E           
E           Expected stdout: "['__#', '__&&', '__<>', '__aa', '__{}']"
E           Actual stdout:   "['@(__aa|__<>|__{}|__#|__&&|)']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s extglob
E           mkdir -p eg5
E           cd eg5
E           touch __{aa,'<>','{}','#','&&'}
E           argv.py @(__aa|'__<>'|__{}|__#|__&&|)
E           
E           # mksh sorts them differently
E           ---
E           
E           ---
E           
E           Test: Escaping of pipe (glibc bug, see demo/glibc_fnmatch.c) (line 159)
E           
E           stdout mismatch:
E             expected: "['__|', 'foo']\n['__|', 'foo']"
E             actual:   "['foo']\n['foo']"
E           
E           Expected stdout: "['__|', 'foo']\n['__|', 'foo']"
E           Actual stdout:   "['foo']\n['foo']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s extglob
E           
E           mkdir -p extpipe
E           cd extpipe
E           
E           touch '__|' foo
E           argv.py @('foo'|__\||bar)
E           argv.py @('foo'|'__|'|bar)
E           ---
E           
E           ... and 4 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[extglob-match.test.sh] __________________

test_file = 'extglob-match.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 25 passed, 4 failed, 0 skipped
E           ============================================================
E           
E           Test: case with extglob (line 219)
E           
E           stdout mismatch:
E             expected: 'A\nA\nU\nB\nC\nD'
E             actual:   ''
E           
E           Expected stdout: 'A\nA\nU\nB\nC\nD'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s extglob
E           for word in --help --verbose --unmatched -- -zxzx -; do
E             case $word in
E               --@(help|verbose) )
E                 echo A
E                 continue
E                 ;;
E               ( --?(b|c) )
E                 echo B
E                 continue
E                 ;;
E               ( -+(x|z) )
E                 echo C
E                 continue
E                 ;;
E               ( -*(x|z) )
E                 echo D
E                 continue
E                 ;;
E               *)
E                 echo U
E                 continue
E                 ;;
E             esac
E           done
E           ---
E           
E           ---
E           
E           Test: [[ $x == !($str) ]] (line 254)
E           
E           Execution error: Expected ']]' to close conditional at line 4, column 15
E           
E           
E           Script:
E           ---
E           shopt -s extglob
E           empty=''
E           str='x'
E           [[ $empty == !($str) ]] && echo TRUE  # test glob match
E           [[ $str == !($str) ]]   || echo FALSE
E           ---
E           
E           ---
E           
E           Test: Turning extglob on changes the meaning of [[ !(str) ]] in bash (line 265)
E           
E           stdout mismatch:
E             expected: 'TRUE\nFALSE\nTRUE\nTRUE'
E             actual:   'TRUE\nFALSE\nTRUE'
E           
E           Expected stdout: 'TRUE\nFALSE\nTRUE\nTRUE'
E           Actual stdout:   'TRUE\nFALSE\nTRUE\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           empty=''
E           str='x'
E           [[ !($empty) ]]  && echo TRUE   # test if $empty is empty
E           [[ !($str) ]]    || echo FALSE  # test if $str is empty
E           shopt -s extglob  # mksh doesn't have this
E           [[ !($empty) ]]  && echo TRUE   # negated glob
E           [[ !($str) ]]    && echo TRUE   # negated glob
E           ---
E           
E           ... and 1 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[fatal-errors.test.sh] ___________________

test_file = 'fatal-errors.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 0 passed, 4 failed, 0 skipped
E           ============================================================
E           
E           Test: Unrecoverable: divide by zero in redirect word (line 8)
E           
E           stdout mismatch:
E             expected: "inside=1\noutside=0\n## END:\n\n\n#### Unrecoverable: divide by zero in conditional word\n\n$SH -c '\nif test foo$(( 42 / 0 )) = foo; then\n  echo true\nelse\n  echo false\nfi\necho inside=$?\n'\necho outside=$?\n\necho ---\n\n$SH -c '\nif test foo$(( 42 / 0 )) = foo; then\n  echo true\nfi\necho inside=$?\n'\necho outside=$?"
E             actual:   ''
E           
E           Expected stdout: "inside=1\noutside=0\n## END:\n\n\n#### Unrecoverable: divide by zero in conditional word\n\n$SH -c '\nif test foo$(( 42 / 0 )) = foo; then\n  echo true\nelse\n  echo false\nfi\necho inside=$?\n'\necho outside=$?\n\necho ---\n\n$SH -c '\nif test foo$(( 42 / 0 )) = foo; then\n  echo true\nfi\necho inside=$?\n'\necho outside=$?\n"
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: division by 0\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           $SH -c '
E           echo hi > file$(( 42 / 0 )) in
E           echo inside=$?
E           '
E           echo outside=$?
E           
E           
E           
E           # bash makes the command fail
E           
E           
E           # bash makes the command fail
E           ---
E           
E           ---
E           
E           Test: Unrecoverable: divide by zero in case (line 83)
E           
E           Execution error: Expected pattern in case item at line 1, column 22
E           
E           
E           Script:
E           ---
E           $SH -c '
E           case $(( 42 / 0 )) in
E             (*) echo hi ;;
E           esac
E           echo inside=$?
E           '
E           echo outside=$?
E           
E           echo ---
E           
E           $SH -c '
E           case foo in
E             ( $(( 42 / 0 )) )
E               echo hi
E               ;;
E           esac
E           echo inside=$?
E           '
E           echo outside=$?
E           ---
E           
E           ---
E           
E           Test: Unrecoverable: ${undef?message} (line 132)
E           
E           stdout mismatch:
E             expected: 'outside=127\noutside=127'
E             actual:   ''
E           
E           Expected stdout: 'outside=127\noutside=127'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: message\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           $SH -c '
E           echo ${undef?message}
E           echo inside=$?
E           '
E           echo outside=$?
E           
E           $SH -c '
E           case ${undef?message} in 
E             (*) echo hi ;;
E           esac
E           echo inside=$?
E           '
E           echo outside=$?
E           ---
E           
E           ... and 1 more failures

tests/spec_tests/test_spec.py:249: Failed
____________________ test_bash_spec_file[for-expr.test.sh] _____________________

test_file = 'for-expr.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 6 passed, 3 failed, 0 skipped
E           ============================================================
E           
E           Test: Accepts { } syntax too (line 43)
E           
E           Execution error: Expected 'do' in for loop at line 1, column 26
E           
E           
E           Script:
E           ---
E           for ((a=1; a <= 3; a++)) {
E             echo $a
E           }
E           ---
E           
E           ---
E           
E           Test: Arith lexer mode (line 96)
E           
E           stdout mismatch:
E             expected: '3\n4\n3\n4\n3\n4\n3\n4'
E             actual:   '3\n4\n3\n4\n3\n4'
E           
E           Expected stdout: '3\n4\n3\n4\n3\n4\n3\n4'
E           Actual stdout:   '3\n4\n3\n4\n3\n4\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash is lenient; zsh disagrees
E           
E           for ((i = '3';  i < '5';  ++i)); do echo $i; done
E           for ((i = "3";  i < "5";  ++i)); do echo $i; done
E           for ((i = $'3'; i < $'5'; ++i)); do echo $i; done
E           for ((i = $"3"; i < $"5"; ++i)); do echo $i; done
E           ---
E           
E           ---
E           
E           Test: Condition that's greater than 32 bits (line 156)
E           
E           Execution error: Expected 'done' to close for loop at line 4, column 3
E           
E           
E           Script:
E           ---
E           iters=0
E           
E           for ((i = 1 << 32; i; ++i)); do
E             echo $i
E             iters=$(( iters + 1 ))
E             if test $iters -eq 5; then
E               break
E             fi
E           done
E           ---

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[func-parsing.test.sh] ___________________

test_file = 'func-parsing.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 9 passed, 3 failed, 3 skipped
E           ============================================================
E           
E           Test: Hard case, function with } token in it (line 43)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 1, column 20
E           
E           
E           Script:
E           ---
E           rbrace() { echo }; }; rbrace
E           ---
E           
E           ---
E           
E           Test: Function name with $ (line 66)
E           
E           status mismatch: expected one of [2, 1], got 0
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           $foo-bar() { ls ; }
E           ---
E           
E           ---
E           
E           Test: Function name with command sub (line 71)
E           
E           status mismatch: expected one of [2, 1], got 0
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           foo-$(echo hi)() { ls ; }
E           ---

tests/spec_tests/test_spec.py:249: Failed
____________________ test_bash_spec_file[glob-bash.test.sh] ____________________

test_file = 'glob-bash.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 4 passed, 4 failed, 0 skipped
E           ============================================================
E           
E           Test: shopt -s failglob in loop context (line 31)
E           
E           stdout mismatch:
E             expected: '*.ZZ\nstatus=0\nstatus=1'
E             actual:   '*.ZZ\nstatus=0\nstatus=0'
E           
E           Expected stdout: '*.ZZ\nstatus=0\nstatus=1'
E           Actual stdout:   '*.ZZ\nstatus=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           for x in *.ZZ; do echo $x; done
E           echo status=$?
E           shopt -s failglob
E           for x in *.ZZ; do echo $x; done
E           echo status=$?
E           ---
E           
E           ---
E           
E           Test: shopt -s failglob in array literal context (line 49)
E           
E           stdout mismatch:
E             expected: '*.ZZ\nstatus=1'
E             actual:   '*.ZZ\nstatus=0'
E           
E           Expected stdout: '*.ZZ\nstatus=1'
E           Actual stdout:   '*.ZZ\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           myarr=(*.ZZ)
E           echo "${myarr[@]}"
E           shopt -s failglob
E           myarr=(*.ZZ)
E           echo status=$?
E           ---
E           
E           ---
E           
E           Test: shopt -s failglob exits properly in loop context with set -e (line 81)
E           
E           stdout mismatch:
E             expected: '*.ZZ\nstatus=0'
E             actual:   '*.ZZ\nstatus=0\nstatus=0'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: '*.ZZ\nstatus=0'
E           Actual stdout:   '*.ZZ\nstatus=0\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           set -e
E           for x in *.ZZ; do echo $x; done
E           echo status=$?
E           
E           shopt -s failglob
E           for x in *.ZZ; do echo $x; done
E           echo status=$?
E           ---
E           
E           ... and 1 more failures

tests/spec_tests/test_spec.py:249: Failed
______________________ test_bash_spec_file[glob.test.sh] _______________________

test_file = 'glob.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 23 passed, 16 failed, 0 skipped
E           ============================================================
E           
E           Test: glob can expand to command and arg (line 37)
E           
E           stdout mismatch:
E             expected: 'spec/testdata/echo.sz'
E             actual:   ''
E           
E           Expected stdout: 'spec/testdata/echo.sz'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: spec/testdata/echo.s[hz]: command not found\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           cd $REPO_ROOT
E           spec/testdata/echo.s[hz]
E           ---
E           
E           ---
E           
E           Test: glob after var expansion (line 43)
E           
E           stdout mismatch:
E             expected: '_tmp/a.A _tmp/aa.A _tmp/b.B'
E             actual:   '_tmp/*.A _tmp/*.B'
E           
E           Expected stdout: '_tmp/a.A _tmp/aa.A _tmp/b.B'
E           Actual stdout:   '_tmp/*.A _tmp/*.B\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch _tmp/a.A _tmp/aa.A _tmp/b.B
E           f="_tmp/*.A"
E           g="$f _tmp/*.B"
E           echo $g
E           ---
E           
E           ---
E           
E           Test: store literal globs in array then expand (line 76)
E           
E           stdout mismatch:
E             expected: '_tmp/a.A _tmp/aa.A _tmp/b.B'
E             actual:   '_tmp/*.A _tmp/*.B'
E           
E           Expected stdout: '_tmp/a.A _tmp/aa.A _tmp/b.B'
E           Actual stdout:   '_tmp/*.A _tmp/*.B\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch _tmp/a.A _tmp/aa.A _tmp/b.B
E           g=("_tmp/*.A" "_tmp/*.B")
E           echo ${g[@]}
E           ---
E           
E           ... and 13 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[globignore.test.sh] ____________________

test_file = 'globignore.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 1 passed, 16 failed, 1 skipped
E           ============================================================
E           
E           Test: Don't glob flags on file system with GLOBIGNORE (line 5)
E           
E           stdout mismatch:
E             expected: '-* hello zzzz?'
E             actual:   'hello zzzzz'
E           
E           Expected stdout: '-* hello zzzz?'
E           Actual stdout:   'hello zzzzz'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           touch _tmp/-n _tmp/zzzzz
E           cd _tmp
E           GLOBIGNORE=-*:zzzzz  # colon-separated pattern list
E           echo -* hello zzzz?
E           ---
E           
E           ---
E           
E           Test: Ignore *.txt (line 16)
E           
E           stdout mismatch:
E             expected: 'one.md foo/two.md foo/two.txt'
E             actual:   'one.md one.txt foo/two.md foo/two.txt'
E           
E           Expected stdout: 'one.md foo/two.md foo/two.txt'
E           Actual stdout:   'one.md one.txt foo/two.md foo/two.txt\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch one.md one.txt
E           mkdir -p foo
E           touch foo/{two.md,two.txt}
E           GLOBIGNORE=*.txt
E           echo *.* foo/*.*
E           ---
E           
E           ---
E           
E           Test: Ignore ?.txt (line 26)
E           
E           stdout mismatch:
E             expected: '10.txt foo/2.txt foo/20.txt'
E             actual:   '1.txt 10.txt foo/2.txt foo/20.txt'
E           
E           Expected stdout: '10.txt foo/2.txt foo/20.txt'
E           Actual stdout:   '1.txt 10.txt foo/2.txt foo/20.txt\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch {1,10}.txt
E           mkdir -p foo
E           touch foo/{2,20}.txt
E           GLOBIGNORE=?.txt
E           echo *.* foo/*.*
E           ---
E           
E           ... and 13 more failures

tests/spec_tests/test_spec.py:249: Failed
____________________ test_bash_spec_file[here-doc.test.sh] _____________________

test_file = 'here-doc.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 24 passed, 12 failed, 0 skipped
E           ============================================================
E           
E           Test: Here redirect with explicit descriptor (line 22)
E           
E           stdout mismatch:
E             expected: 'one'
E             actual:   ''
E           
E           Expected stdout: 'one'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'cat: 0: No such file or directory\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # A space between 0 and <<EOF causes it to pass '0' as an arg to cat.
E           cat 0<<EOF
E           one
E           EOF
E           ---
E           
E           ---
E           
E           Test: Here doc from another input file descriptor (line 29)
E           
E           stdout mismatch:
E             expected: '8: here doc on descriptor'
E             actual:   ''
E           
E           Expected stdout: '8: here doc on descriptor'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # NOTE: OSH fails on descriptor 9, but not descriptor 8?  Is this because of
E           # the Python VM?  How  to inspect state?
E           read_from_fd.py 8  8<<EOF
E           here doc on descriptor
E           EOF
E           ---
E           
E           ---
E           
E           Test: Multiple here docs with different descriptors (line 37)
E           
E           Execution error: 'HereDocNode' object has no attribute 'parts'
E           
E           
E           Script:
E           ---
E           read_from_fd.py 0 3 <<EOF 3<<EOF3
E           fd0
E           EOF
E           fd3
E           EOF3
E           ---
E           
E           ... and 9 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[introspect.test.sh] ____________________

test_file = 'introspect.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 3 passed, 7 failed, 0 skipped
E           ============================================================
E           
E           Test: ${FUNCNAME[@]} array (line 28)
E           
E           stdout mismatch:
E             expected: "['f']\n['g', 'f']\n['f']"
E             actual:   '[]\n[]\n[]'
E           
E           Expected stdout: "['f']\n['g', 'f']\n['f']"
E           Actual stdout:   '[]\n[]\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           g() {
E             argv.py "${FUNCNAME[@]}"
E           }
E           f() {
E             argv.py "${FUNCNAME[@]}"
E             g
E             argv.py "${FUNCNAME[@]}"
E           }
E           f
E           ---
E           
E           ---
E           
E           Test: FUNCNAME with source (scalar or array) (line 44)
E           
E           stdout mismatch:
E             expected: "['  @', 'source', 'f', 'g']\n['  0', 'source']\n['${}', 'source']\n['  $', 'source']\n-----\n['  @']\n['  0', '']\n['${}', '']\n['  $', '']\n-----\n[]\n-----\n['  @', 'A']\n['  0', 'A']\n['${}', 'A']\n['  $', 'A']"
E             actual:   "-----\n-----\n[]\n-----\n['  @', 'A']\n['  0', 'A']\n['${}', 'A']\n['  $', 'A']"
E           
E           Expected stdout: "['  @', 'source', 'f', 'g']\n['  0', 'source']\n['${}', 'source']\n['  $', 'source']\n-----\n['  @']\n['  0', '']\n['${}', '']\n['  $', '']\n-----\n[]\n-----\n['  @', 'A']\n['  0', 'A']\n['${}', 'A']\n['  $', 'A']"
E           Actual stdout:   "-----\n-----\n[]\n-----\n['  @', 'A']\n['  0', 'A']\n['${}', 'A']\n['  $', 'A']\n"
E           Expected stderr: None
E           Actual stderr:   'bash: source: spec/testdata/echo-funcname.sh: No such file or directory\nbash: source: spec/testdata/echo-funcname.sh: No such file or directory\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           cd $REPO_ROOT
E           
E           # Comments on bash quirk:
E           # https://github.com/oilshell/oil/pull/656#issuecomment-599162211
E           
E           f() {
E             . spec/testdata/echo-funcname.sh
E           }
E           g() {
E             f
E           }
E           
E           g
E           echo -----
E           
E           . spec/testdata/echo-funcname.sh
E           echo -----
E           
E           argv.py "${FUNCNAME[@]}"
E           
E           # Show bash inconsistency.  FUNCNAME doesn't behave like a normal array.
E           case $SH in 
E             (bash)
E               echo -----
E               a=('A')
E               argv.py '  @' "${a[@]}"
E               argv.py '  0' "${a[0]}"
E               argv.py '${}' "${a}"
E               argv.py '  $' "$a"
E               ;;
E           esac
E           ---
E           
E           ---
E           
E           Test: $((BASH_LINENO)) (scalar form in arith) (line 150)
E           
E           stdout mismatch:
E             expected: '4'
E             actual:   '0'
E           
E           Expected stdout: '4'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           check() {
E             echo $((BASH_LINENO))
E           }
E           check
E           ---
E           
E           ... and 4 more failures

tests/spec_tests/test_spec.py:249: Failed
______________________ test_bash_spec_file[loop.test.sh] _______________________

test_file = 'loop.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 15 passed, 14 failed, 0 skipped
E           ============================================================
E           
E           Test: implicit for loop (line 4)
E           
E           stdout mismatch:
E             expected: '1\n2\n3\nfinished=3'
E             actual:   'finished='
E           
E           Expected stdout: '1\n2\n3\nfinished=3'
E           Actual stdout:   'finished=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # This is like "for i in $@".
E           fun() {
E             for i; do
E               echo $i
E             done
E             echo "finished=$i"
E           }
E           fun 1 2 3
E           ---
E           
E           ---
E           
E           Test: the word 'in' can be the loop variable (line 39)
E           
E           Execution error: Expected variable name after 'for' at line 1, column 5
E           
E           
E           Script:
E           ---
E           for in in a b c; do
E             echo $in
E           done
E           echo finished=$in
E           ---
E           
E           ---
E           
E           Test: while in pipe with subshell (line 145)
E           
E           stdout mismatch:
E             expected: '3'
E             actual:   '0'
E           
E           Expected stdout: '3'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           i=0
E           seq 3 | ( while read foo; do
E             i=$((i+1))
E             #echo $i
E           done
E           echo $i )
E           ---
E           
E           ... and 11 more failures

tests/spec_tests/test_spec.py:249: Failed
_____________________ test_bash_spec_file[nameref.test.sh] _____________________

test_file = 'nameref.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 11 passed, 19 failed, 2 skipped
E           ============================================================
E           
E           Test: pass array by reference (line 4)
E           
E           stdout mismatch:
E             expected: 'zo'
E             actual:   ''
E           
E           Expected stdout: 'zo'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show_value() {
E             local -n array_name=$1
E             local idx=$2
E             echo "${array_name[$idx]}"
E           }
E           shadock=(ga bu zo meu)
E           show_value shadock 2
E           ---
E           
E           ---
E           
E           Test: mutate array by reference (line 14)
E           
E           stdout mismatch:
E             expected: 'a ZZZ c d'
E             actual:   'a b c d'
E           
E           Expected stdout: 'a ZZZ c d'
E           Actual stdout:   'a b c d\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set1() {
E             local -n array_name=$1
E             local val=$2
E             array_name[1]=$val
E           }
E           shadock=(a b c d)
E           set1 shadock ZZZ
E           echo ${shadock[@]}
E           ---
E           
E           ---
E           
E           Test: pass assoc array by reference (line 27)
E           
E           stdout mismatch:
E             expected: 'jam'
E             actual:   ''
E           
E           Expected stdout: 'jam'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show_value() {
E             local -n array_name=$1
E             local idx=$2
E             echo "${array_name[$idx]}"
E           }
E           days=([monday]=eggs [tuesday]=bread [sunday]=jam)
E           show_value days sunday
E           #  mksh note: it coerces "days" to 0?  Horrible.
E           ---
E           
E           ... and 16 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[nix-idioms.test.sh] ____________________

test_file = 'nix-idioms.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 1 passed, 5 failed, 0 skipped
E           ============================================================
E           
E           Test: var ref to array 'preHooks[@]' (line 3)
E           
E           stdout mismatch:
E             expected: "show\n[]\n[]\nshow\n['foo', 'bar', 'baz']\n['foo bar', 'baz']"
E             actual:   "show\n[]\n[]\nshow\n['foo', 'bar', 'baz']\n[]"
E           
E           Expected stdout: "show\n[]\n[]\nshow\n['foo', 'bar', 'baz']\n['foo bar', 'baz']"
E           Actual stdout:   "show\n[]\n[]\nshow\n['foo', 'bar', 'baz']\n[]\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           #
E           # This idiom discussed on
E           # https://github.com/NixOS/nixpkgs/pull/147629
E           
E           show() {
E             echo show
E           
E             # These are actually different
E             argv.py ${!hooksSlice}
E           
E             argv.py ${!hooksSlice+"${!hooksSlice}"}
E           }
E           
E           hooksSlice='preHooks[@]'
E           
E           preHooks=()
E           show
E           
E           preHooks=('foo bar' baz)
E           show
E           
E           # WTF this exposes a difference?  But not the test case below?
E           
E           # What's happening here?
E           # Uncomment this and get an error in bash about hookSlice, even though we never
E           # undefined it.
E           
E           #wtf=1
E           #
E           # line 6: !hooksSlice: unbound variable
E           
E           if test -n "$wtf"; then
E             # 4.4.0(1)-release
E             # echo $BASH_VERSION
E           
E             set -u
E             preHooks=()
E             show
E           
E             preHooks=('foo bar' baz)
E             show
E           fi
E           ---
E           
E           ---
E           
E           Test: Similar to above with set -u (line 56)
E           
E           stdout mismatch:
E             expected: "show\n[]\nshow\n['foo bar', 'baz']"
E             actual:   'show\n[]\nshow\n[]'
E           
E           Expected stdout: "show\n[]\nshow\n['foo bar', 'baz']"
E           Actual stdout:   'show\n[]\nshow\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show() {
E             echo show
E           
E             # bash gives an error here - !hookSlice unbound, even though preHooks exists
E             # OSH currently does the "logical" thing
E           
E             # NOT testing this -- I think this is WHAT NIX WORKS AROUND WITH
E             #argv.py ${!hooksSlice}
E           
E             argv.py ${!hooksSlice+"${!hooksSlice}"}
E           }
E           
E           hooksSlice='preHooks[@]'
E           
E           set -u
E           preHooks=()
E           show
E           
E           preHooks=('foo bar' baz)
E           show
E           ---
E           
E           ---
E           
E           Test: ${!ref} to undefined string var is fatal, INCONSISTENT with array (line 103)
E           
E           stdout mismatch:
E             expected: '[]'
E             actual:   '[]\n[]\nend'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: '[]'
E           Actual stdout:   '[]\n[]\nend\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           hookSlice='preHooks'
E           
E           argv.py ${!hookSlice}
E           
E           set -u
E           
E           argv.py ${!hookSlice}
E           
E           echo end
E           ---
E           
E           ... and 2 more failures

tests/spec_tests/test_spec.py:249: Failed
________________ test_bash_spec_file[nocasematch-match.test.sh] ________________

test_file = 'nocasematch-match.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 3 passed, 3 failed, 0 skipped
E           ============================================================
E           
E           Test: [[ equality matching (line 6)
E           
E           stdout mismatch:
E             expected: '0\n0\n0\n0'
E             actual:   '1\n1\n1\n1'
E           
E           Expected stdout: '0\n0\n0\n0'
E           Actual stdout:   '1\n1\n1\n1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s nocasematch
E           [[ a == A ]]; echo $?
E           [[ A == a ]]; echo $?
E           [[ A == [a] ]]; echo $?
E           [[ a == [A] ]]; echo $?
E           ---
E           
E           ---
E           
E           Test: [[ regex matching (line 19)
E           
E           stdout mismatch:
E             expected: '0\n0\n0\n0'
E             actual:   '1\n1\n1\n1'
E           
E           Expected stdout: '0\n0\n0\n0'
E           Actual stdout:   '1\n1\n1\n1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s nocasematch
E           [[ a =~ A ]]; echo $?
E           [[ A =~ a ]]; echo $?
E           [[ a =~ [A] ]]; echo $?
E           [[ A =~ [a] ]]; echo $?
E           ---
E           
E           ---
E           
E           Test: case matching (line 41)
E           
E           stdout mismatch:
E             expected: '0\n0\n0\n0'
E             actual:   '1\n1\n1\n1'
E           
E           Expected stdout: '0\n0\n0\n0'
E           Actual stdout:   '1\n1\n1\n1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s nocasematch
E           case a in A) echo 0 ;; *) echo 1 ;; esac
E           case A in a) echo 0 ;; *) echo 1 ;; esac
E           case a in [A]) echo 0 ;; *) echo 1 ;; esac
E           case A in [a]) echo 0 ;; *) echo 1 ;; esac
E           ---

tests/spec_tests/test_spec.py:249: Failed
____________________ test_bash_spec_file[nul-bytes.test.sh] ____________________

test_file = 'nul-bytes.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 2 passed, 14 failed, 0 skipped
E           ============================================================
E           
E           Test: NUL bytes with echo -e (line 5)
E           
E           stdout mismatch:
E             expected: '  \\0   -  \\n\n  00  2d  0a'
E             actual:   '  00  2d  0a'
E           
E           Expected stdout: '  \\0   -  \\n\n  00  2d  0a'
E           Actual stdout:   '  00  2d  0a\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           show_hex() { od -A n -t c -t x1; }
E           
E           echo -e '\0-' | show_hex
E           #echo -e '\x00-'
E           #echo -e '\000-'
E           ---
E           
E           ---
E           
E           Test: printf - literal NUL in format string (line 27)
E           
E           stdout mismatch:
E             expected: '   x\n  78\n---\n   x\n  78\n---'
E             actual:   '  78  7a\n---\n  78  7a\n---\n  7a'
E           
E           Expected stdout: '   x\n  78\n---\n   x\n  78\n---'
E           Actual stdout:   '  78  7a\n---\n  78  7a\n---\n  7a\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|ash) return ;; esac
E           
E           # Show both printable and hex
E           show_hex() { od -A n -t c -t x1; }
E           
E           printf $'x\U0z' | show_hex
E           echo ---
E           
E           printf $'x\U00z' | show_hex
E           echo ---
E           
E           printf $'\U0z' | show_hex
E           ---
E           
E           ---
E           
E           Test: printf - \0 escape shows NUL byte (line 63)
E           
E           stdout mismatch:
E             expected: '  \\0  \\n\n  00  0a'
E             actual:   '  00  0a'
E           
E           Expected stdout: '  \\0  \\n\n  00  0a'
E           Actual stdout:   '  00  0a\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show_hex() { od -A n -t c -t x1; }
E           
E           printf '\0\n' | show_hex
E           ---
E           
E           ... and 11 more failures

tests/spec_tests/test_spec.py:249: Failed
_________________ test_bash_spec_file[paren-ambiguity.test.sh] _________________

test_file = 'paren-ambiguity.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 5 passed, 4 failed, 0 skipped
E           ============================================================
E           
E           Test: (( closed with ) ) after multiple lines is command - #2337 (line 4)
E           
E           stdout mismatch:
E             expected: '1\n2\n3'
E             actual:   ''
E           
E           Expected stdout: '1\n2\n3'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           (( echo 1
E           echo 2
E           (( x ))
E           : $(( x ))
E           echo 3
E           ) )
E           ---
E           
E           ---
E           
E           Test: $(( closed with ) ) after multiple lines is command - #2337 (line 18)
E           
E           stdout mismatch:
E             expected: '1 2 3'
E             actual:   '0'
E           
E           Expected stdout: '1 2 3'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo $(( echo 1
E           echo 2
E           (( x ))
E           : $(( x ))
E           echo 3
E           ) )
E           ---
E           
E           ---
E           
E           Test: $(( closed with )) after multiple lines is parse error - #2337 (line 56)
E           
E           stdout mismatch:
E             expected: 'ok'
E             actual:   '0'
E           
E           Expected stdout: 'ok'
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           $SH -c '
E           echo $(( echo 1
E           echo 2
E           (( x ))
E           : $(( x ))
E           echo 3
E           ))
E           '
E           if test $? -ne 0; then
E             echo ok
E           fi
E           ---
E           
E           ... and 1 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[parse-errors.test.sh] ___________________

test_file = 'parse-errors.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 6 passed, 21 failed, 0 skipped
E           ============================================================
E           
E           Test: Long Token - 65535 bytes (line 4)
E           
E           stdout mismatch:
E             expected: '65535 out'
E             actual:   '0 out'
E           
E           Expected stdout: '65535 out'
E           Actual stdout:   '0 out\n'
E           Expected stderr: None
E           Actual stderr:   'bash: python2: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           python2 -c 'print("echo -n %s" % ("x" * 65535))' > tmp.sh
E           $SH tmp.sh > out
E           wc --bytes out
E           ---
E           
E           ---
E           
E           Test: Token that's too long for Oils - 65536 bytes (line 15)
E           
E           stdout mismatch:
E             expected: 'status=0\n65536 out'
E             actual:   'status=0\n0 out'
E           
E           Expected stdout: 'status=0\n65536 out'
E           Actual stdout:   'status=0\n0 out\n'
E           Expected stderr: None
E           Actual stderr:   'bash: python2: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           python2 -c 'print("echo -n %s" % ("x" * 65536))' > tmp.sh
E           $SH tmp.sh > out
E           echo status=$?
E           wc --bytes out
E           ---
E           
E           ---
E           
E           Test: Bad braced var sub -- not allowed (line 32)
E           
E           status mismatch: expected one of [2, 1], got 0
E           
E           Expected stdout: None
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           echo ${%}
E           ---
E           
E           ... and 18 more failures

tests/spec_tests/test_spec.py:249: Failed
____________________ test_bash_spec_file[pipeline.test.sh] _____________________

test_file = 'pipeline.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 14 passed, 12 failed, 0 skipped
E           ============================================================
E           
E           Test: While Loop ends pipeline (line 27)
E           
E           stdout mismatch:
E             expected: '.1\n.2\n.3'
E             actual:   ''
E           
E           Expected stdout: '.1\n.2\n.3'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           seq 3 | while read i
E           do
E             echo ".$i"
E           done
E           ---
E           
E           ---
E           
E           Test: Initial value of PIPESTATUS is empty string (line 53)
E           
E           stdout mismatch:
E             expected: 'pipestatus'
E             actual:   'pipestatus 0'
E           
E           Expected stdout: 'pipestatus'
E           Actual stdout:   'pipestatus 0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|zsh) exit ;; esac
E           
E           echo pipestatus ${PIPESTATUS[@]}
E           ---
E           
E           ---
E           
E           Test: |& (line 115)
E           
E           stdout mismatch:
E             expected: 'STDERR\nSTDOUT'
E             actual:   'STDOUT'
E           
E           Expected stdout: 'STDERR\nSTDOUT'
E           Actual stdout:   'STDOUT\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           stdout_stderr.py |& cat
E           ---
E           
E           ... and 9 more failures

tests/spec_tests/test_spec.py:249: Failed
______________________ test_bash_spec_file[posix.test.sh] ______________________

test_file = 'posix.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 11 passed, 4 failed, 0 skipped
E           ============================================================
E           
E           Test: Empty for loop without in.  Do can be on the same line I guess. (line 16)
E           
E           stdout mismatch:
E             expected: 'hi\na\nhi\nb'
E             actual:   ''
E           
E           Expected stdout: 'hi\na\nhi\nb'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- a b
E           for x do
E             echo hi
E             echo $x
E           done
E           ---
E           
E           ---
E           
E           Test: Empty action for case is syntax error (line 57)
E           
E           Execution error: Expected pattern in case item at line 5, column 4
E           
E           
E           Script:
E           ---
E           # POSIX grammar seems to allow this, but bash and dash don't.  Need ;;
E           foo=a
E           case $foo in
E             a)
E             b)
E               echo A ;;
E             d)
E           esac
E           ---
E           
E           ---
E           
E           Test: Bare semi-colon not allowed (line 86)
E           
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           # This is disallowed by the grammar; bash and dash don't accept it.
E           ;
E           
E           
E           
E           #
E           # Explicit tests
E           #
E           ---
E           
E           ... and 1 more failures

tests/spec_tests/test_spec.py:249: Failed
______________________ test_bash_spec_file[quote.test.sh] ______________________

test_file = 'quote.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 28 passed, 6 failed, 1 skipped
E           ============================================================
E           
E           Test: Backslash escapes inside double quoted string (line 75)
E           
E           stdout mismatch:
E             expected: '$ \\ \\ \\p \\q'
E             actual:   '\\$ \\ \\ \\p \\q'
E           
E           Expected stdout: '$ \\ \\ \\p \\q'
E           Actual stdout:   '\\$ \\ \\ \\p \\q\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo "\$ \\ \\ \p \q"
E           ---
E           
E           ---
E           
E           Test: Unterminated double quote (line 124)
E           
E           status mismatch: expected 2, got 0
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           #
E           # TODO: Might be another section?
E           #
E           ---
E           
E           ---
E           
E           Test: $'' octal escapes don't have leading 0 (line 182)
E           
E           stdout mismatch:
E             expected: ' 001 377'
E             actual:   ' 001     303 277'
E           
E           Expected stdout: ' 001 377'
E           Actual stdout:   ' 001     303 277\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # echo -e syntax is echo -e \0377
E           echo -n $'\001' $'\377' | od -A n -c | sed 's/ \+/ /g'
E           ---
E           
E           ... and 3 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[redir-order.test.sh] ___________________

test_file = 'redir-order.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 1 passed, 4 failed, 0 skipped
E           ============================================================
E           
E           Test: subshell + redirect order (line 14)
E           
E           stdout mismatch:
E             expected: 'world'
E             actual:   'hello world'
E           
E           Expected stdout: 'world'
E           Actual stdout:   'hello world\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo hello > OSCFLAGS
E           (echo `cat OSCFLAGS` "world") > OSCFLAGS
E           cat OSCFLAGS
E           ---
E           
E           ---
E           
E           Test: for word + redirect order (line 24)
E           
E           stdout mismatch:
E             expected: 'world'
E             actual:   'hello\nworld\nhello'
E           
E           Expected stdout: 'world'
E           Actual stdout:   'hello\nworld\nhello\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo hello > OSCFLAGS
E           for x in `cat OSCFLAGS` world; do
E             echo $x
E           done > OSCFLAGS
E           cat OSCFLAGS
E           ---
E           
E           ---
E           
E           Test: case word + redirect order (line 36)
E           
E           stdout mismatch:
E             expected: 'other'
E             actual:   'hello\nhello'
E           
E           Expected stdout: 'other'
E           Actual stdout:   'hello\nhello\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo hello > OSCFLAGS
E           case `cat OSCFLAGS` in
E             hello)
E               echo hello
E               ;;
E             *)
E               echo other
E               ;;
E           esac > OSCFLAGS
E           cat OSCFLAGS
E           ---
E           
E           ... and 1 more failures

tests/spec_tests/test_spec.py:249: Failed
________________ test_bash_spec_file[redirect-command.test.sh] _________________

test_file = 'redirect-command.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 14 passed, 9 failed, 0 skipped
E           ============================================================
E           
E           Test: >$file touches a file (line 8)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=0'
E             actual:   'status=1\nstatus=1'
E           
E           Expected stdout: 'status=1\nstatus=0'
E           Actual stdout:   'status=1\nstatus=1\n'
E           Expected stderr: ''
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           rm -f myfile
E           test -f myfile
E           echo status=$?
E           
E           >myfile
E           test -f myfile
E           echo status=$?
E           
E           
E           
E           # regression for OSH
E           ---
E           
E           ---
E           
E           Test: $(< $file) yields the contents of the file (line 29)
E           
E           stdout mismatch:
E             expected: "['2\\n3']"
E             actual:   "['']"
E           
E           Expected stdout: "['2\\n3']"
E           Actual stdout:   "['']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           seq 2 3 > myfile
E           foo=$(< myfile)
E           argv.py "$foo"
E           ---
E           
E           ---
E           
E           Test: `< $file` behaves like $(< file) (line 43)
E           
E           stdout mismatch:
E             expected: '[7\n8]'
E             actual:   '[]'
E           
E           Expected stdout: '[7\n8]'
E           Actual stdout:   '[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           seq 7 8 > myfile
E           
E           x=`< myfile`
E           
E           echo "[$x]"
E           ---
E           
E           ... and 6 more failures

tests/spec_tests/test_spec.py:249: Failed
_________________ test_bash_spec_file[redirect-multi.test.sh] __________________

test_file = 'redirect-multi.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 1 passed, 11 failed, 1 skipped
E           ============================================================
E           
E           Test: File redirects with glob args (bash and zsh only) (line 3)
E           
E           stdout mismatch:
E             expected: 'hi\nescaped'
E             actual:   'escaped'
E           
E           Expected stdout: 'hi\nescaped'
E           Actual stdout:   'escaped\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           touch one-bar
E           
E           echo hi > one-*
E           
E           cat one-bar
E           
E           echo escaped > one-\*
E           
E           cat one-\*
E           ---
E           
E           ---
E           
E           Test: File redirect without matching any file, with failglob (line 58)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1'
E             actual:   'status=0\nzz-*-xx\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1'
E           Actual stdout:   'status=0\nzz-*-xx\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           shopt -s failglob
E           
E           echo hi > zz-*-xx
E           echo status=$?
E           
E           echo zz*
E           echo status=$?
E           ---
E           
E           ---
E           
E           Test: Redirect to $empty (in function body) (line 82)
E           
E           stdout mismatch:
E             expected: 'status=1'
E             actual:   'status=0'
E           
E           Expected stdout: 'status=1'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           empty=''
E           fun() { echo hi; } > $empty
E           fun
E           echo status=$?
E           ---
E           
E           ... and 8 more failures

tests/spec_tests/test_spec.py:249: Failed
____________________ test_bash_spec_file[redirect.test.sh] _____________________

test_file = 'redirect.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 21 passed, 19 failed, 1 skipped
E           ============================================================
E           
E           Test: >& and <& are the same (line 4)
E           
E           stderr mismatch:
E             expected: 'one\ntwo'
E             actual:   'one'
E           
E           Expected stdout: None
E           Actual stdout:   'two\n'
E           Expected stderr: 'one\ntwo'
E           Actual stderr:   'one\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo one 1>&2
E           
E           echo two 1<&2
E           ---
E           
E           ---
E           
E           Test: <& (line 16)
E           
E           stdout mismatch:
E             expected: '[foo51]'
E             actual:   '[]'
E           
E           Expected stdout: '[foo51]'
E           Actual stdout:   '[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Is there a simpler test case for this?
E           echo foo51 > $TMP/lessamp.txt
E           
E           exec 6< $TMP/lessamp.txt
E           read line <&6
E           
E           echo "[$line]"
E           ---
E           
E           ---
E           
E           Test: 2&>1 (is it a redirect or is it like a&>1) (line 38)
E           
E           stdout mismatch:
E             expected: 'status=127'
E             actual:   'status=1'
E           
E           Expected stdout: 'status=127'
E           Actual stdout:   'status=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           2&>1
E           echo status=$?
E           ---
E           
E           ... and 16 more failures

tests/spec_tests/test_spec.py:249: Failed
______________________ test_bash_spec_file[regex.test.sh] ______________________

test_file = 'regex.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 15 passed, 22 failed, 0 skipped
E           ============================================================
E           
E           Test: BASH_REMATCH (line 39)
E           
E           stdout mismatch:
E             expected: "status=0\n['foo123', 'foo', '123']\nstatus=1\n[]"
E             actual:   "status=0\n['foo123', 'foo', '123']\nstatus=1\n['foo123', 'foo', '123']"
E           
E           Expected stdout: "status=0\n['foo123', 'foo', '123']\nstatus=1\n[]"
E           Actual stdout:   "status=0\n['foo123', 'foo', '123']\nstatus=1\n['foo123', 'foo', '123']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           [[ foo123 =~ ([a-z]+)([0-9]+) ]]
E           echo status=$?
E           argv.py "${BASH_REMATCH[@]}"
E           
E           [[ failed =~ ([a-z]+)([0-9]+) ]]
E           echo status=$?
E           argv.py "${BASH_REMATCH[@]}"  # not cleared!
E           ---
E           
E           ---
E           
E           Test: Regex quoted with single quotes (line 74)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'true'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'true\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash doesn't like the quotes
E           [[ 'a b' =~ '^(a b)$' ]] && echo true
E           ---
E           
E           ---
E           
E           Test: Regex quoted with double quotes (line 82)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'true'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'true\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash doesn't like the quotes
E           [[ 'a b' =~ "^(a b)$" ]] && echo true
E           ---
E           
E           ... and 19 more failures

tests/spec_tests/test_spec.py:249: Failed
____________________ test_bash_spec_file[serialize.test.sh] ____________________

test_file = 'serialize.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 0 passed, 9 failed, 1 skipped
E           ============================================================
E           
E           Test: printf %q newline (line 12)
E           
E           stdout mismatch:
E             expected: "$'one\\ntwo'\nroundtrip-ok"
E             actual:   "$'one\\ntwo'"
E           
E           Expected stdout: "$'one\\ntwo'\nroundtrip-ok"
E           Actual stdout:   "$'one\\ntwo'\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           case $SH in ash) return ;; esac  # yash and ash don't implement this
E           
E           newline=$'one\ntwo'
E           printf '%q\n' "$newline"
E           
E           quoted="$(printf '%q\n' "$newline")"
E           restored=$(eval "echo $quoted")
E           test "$newline" = "$restored" && echo roundtrip-ok
E           ---
E           
E           ---
E           
E           Test: printf %q spaces (line 36)
E           
E           stdout mismatch:
E             expected: 'one\\ two'
E             actual:   "$'one two'"
E           
E           Expected stdout: 'one\\ two'
E           Actual stdout:   "$'one two'\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in ash) return ;; esac  # yash and ash don't implement this
E           
E           # bash does a weird thing and uses \
E           
E           spaces='one two'
E           printf '%q\n' "$spaces"
E           ---
E           
E           ---
E           
E           Test: printf %q quotes (line 52)
E           
E           stdout mismatch:
E             expected: '\\\'\\"\nroundtrip-ok'
E             actual:   '$\'\\\'"\''
E           
E           Expected stdout: '\\\'\\"\nroundtrip-ok'
E           Actual stdout:   '$\'\\\'"\'\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           case $SH in ash) return ;; esac  # yash and ash don't implement %q
E           
E           quotes=\'\"
E           printf '%q\n' "$quotes"
E           
E           quoted="$(printf '%q\n' "$quotes")"
E           restored=$(eval "echo $quoted")
E           test "$quotes" = "$restored" && echo roundtrip-ok
E           ---
E           
E           ... and 6 more failures

tests/spec_tests/test_spec.py:249: Failed
_____________________ test_bash_spec_file[sh-func.test.sh] _____________________

test_file = 'sh-func.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 8 passed, 4 failed, 0 skipped
E           ============================================================
E           
E           Test: Return statement (line 20)
E           
E           stdout mismatch:
E             expected: 'one'
E             actual:   ''
E           
E           Expected stdout: 'one'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 42
E           Actual status:   42
E           
E           Script:
E           ---
E           f() {
E             echo one
E             return 42
E             echo two
E           }
E           f
E           ---
E           
E           ---
E           
E           Test: return "" (a lot of disagreement) (line 95)
E           
E           stdout mismatch:
E             expected: 'f\nstatus=2'
E             actual:   'f\nstatus=1'
E           
E           Expected stdout: 'f\nstatus=2'
E           Actual stdout:   'f\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   'bash: return: : numeric argument required\n'
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E             echo f
E             return ""
E           }
E           
E           f
E           echo status=$?
E           ---
E           
E           ---
E           
E           Test: return $empty (line 124)
E           
E           stdout mismatch:
E             expected: 'f\nstatus=0'
E             actual:   'status=0'
E           
E           Expected stdout: 'f\nstatus=0'
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           f() {
E             echo f
E             empty=
E             return $empty
E           }
E           
E           f
E           echo status=$?
E           ---
E           
E           ... and 1 more failures

tests/spec_tests/test_spec.py:249: Failed
_________________ test_bash_spec_file[sh-options-bash.test.sh] _________________

test_file = 'sh-options-bash.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 0 passed, 8 failed, 1 skipped
E           ============================================================
E           
E           Test: SHELLOPTS is updated when options are changed (line 4)
E           
E           stdout mismatch:
E             expected: '1\n0\n1'
E             actual:   '1\n1\n1'
E           
E           Expected stdout: '1\n0\n1'
E           Actual stdout:   '1\n1\n1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo $SHELLOPTS | grep -q xtrace
E           echo $?
E           set -x
E           echo $SHELLOPTS | grep -q xtrace
E           echo $?
E           set +x
E           echo $SHELLOPTS | grep -q xtrace
E           echo $?
E           ---
E           
E           ---
E           
E           Test: SHELLOPTS is readonly (line 24)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   'status=0'
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: ''
E           Actual stdout:   'status=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           SHELLOPTS=x
E           echo status=$?
E           
E           # Setting a readonly variable in osh is a hard failure.
E           # just-bash also treats readonly assignment as fatal (matches osh)
E           ---
E           
E           ---
E           
E           Test: SHELLOPTS and BASHOPTS are non-empty (line 37)
E           
E           stdout mismatch:
E             expected: 'shellopts is set\nbashopts is set'
E             actual:   ''
E           
E           Expected stdout: 'shellopts is set\nbashopts is set'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: \n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           # 2024-06 - tickled by Samuel testing Gentoo
E           
E           if test -v SHELLOPTS; then
E             echo 'shellopts is set'
E           fi
E           if test -v BASHOPTS; then
E           	echo 'bashopts is set'
E           fi
E           
E           # bash: braceexpand:hashall etc.
E           
E           echo shellopts ${SHELLOPTS:?} > /dev/null
E           echo bashopts ${BASHOPTS:?} > /dev/null
E           ---
E           
E           ... and 5 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[sh-options.test.sh] ____________________

test_file = 'sh-options.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 13 passed, 25 failed, 1 skipped
E           ============================================================
E           
E           Test: $- with -c (line 7)
E           
E           stdout mismatch:
E             expected: 'huBc'
E             actual:   ''
E           status mismatch: expected 0, got 127
E           
E           Expected stdout: 'huBc'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: -o: No such file or directory\n'
E           Expected status: 0
E           Actual status:   127
E           
E           Script:
E           ---
E           # dash's behavior seems most sensible here?
E           $SH -o nounset -c 'echo $-'
E           ---
E           
E           ---
E           
E           Test: $- with pipefail (line 16)
E           
E           stdout mismatch:
E             expected: 'huBs'
E             actual:   ''
E           status mismatch: expected 0, got 1
E           
E           Expected stdout: 'huBs'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: -: unbound variable\n'
E           Expected status: 0
E           Actual status:   1
E           
E           Script:
E           ---
E           # Note: pipefail has no short flag in $-, we now include h (hashall), B (braceexpand), and s (stdin reading)
E           set -o pipefail -o nounset
E           echo $-
E           ---
E           
E           ---
E           
E           Test: $- and more options (line 27)
E           
E           stdout mismatch:
E             expected: 'yes\nyes\nyes\nyes'
E             actual:   ''
E           
E           Expected stdout: 'yes\nyes\nyes\nyes'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: set: -f: invalid option\n'
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           set -efuC
E           o=$-
E           [[ $o == *e* ]]; echo yes
E           [[ $o == *f* ]]; echo yes
E           [[ $o == *u* ]]; echo yes
E           [[ $o == *C* ]]; echo yes
E           ---
E           
E           ... and 22 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[shell-bugs.test.sh] ____________________

test_file = 'shell-bugs.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 0 passed, 1 failed, 0 skipped
E           ============================================================
E           
E           Test: ./configure idiom (line 17)
E           
E           stdout mismatch:
E             expected: 'supports -f\nenv'
E             actual:   ''
E           
E           Expected stdout: 'supports -f\nenv'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: -f: command not found\n\nreal\t0m0.000s\nuser\t0m0.000s\nsys\t0m0.000s\nbash: -f: command not found\n\nreal\t0m0.000s\nuser\t0m0.000s\nsys\t0m0.000s\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -o errexit
E           
E           if command time -f '%e %M' true; then
E             echo 'supports -f'
E             # BUG: this was wrong
E             #time -f '%e %M' true
E           
E             # Need 'command time'
E             command time -f '%e %M' true
E           fi
E           
E           if env time -f '%e %M' true; then
E             echo 'env'
E             env time -f '%e %M' true
E           fi
E           ---

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[shell-grammar.test.sh] __________________

test_file = 'shell-grammar.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 33 passed, 5 failed, 0 skipped
E           ============================================================
E           
E           Test: Invalid token (line 35)
E           
E           Execution error: Maximum parse iterations exceeded (possible infinite loop) at line 1, column 1
E           
E           
E           Script:
E           ---
E           ;;
E           ---
E           
E           ---
E           
E           Test: If with then on same line missing semicolon (line 112)
E           
E           Execution error: Expected 'then' after condition at line 4, column 1
E           
E           
E           Script:
E           ---
E           # My ANTLR parsers fail to flag this.  The 'else' keyword should be unexpected.
E           if echo then
E             echo
E           else
E             echo
E           fi
E           ---
E           
E           ---
E           
E           Test: case item without ;; is not allowed (line 154)
E           
E           Execution error: Expected pattern in case item at line 3, column 9
E           
E           
E           Script:
E           ---
E           case word_a in
E             word_a)
E             word_b)
E               echo
E               ;;
E           esac
E           ---
E           
E           ... and 2 more failures

tests/spec_tests/test_spec.py:249: Failed
______________________ test_bash_spec_file[smoke.test.sh] ______________________

test_file = 'smoke.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 17 passed, 1 failed, 0 skipped
E           ============================================================
E           
E           Test: here doc with var (line 44)
E           
E           stdout mismatch:
E             expected: '"two\none'
E             actual:   'two\none'
E           
E           Expected stdout: '"two\none\n'
E           Actual stdout:   'two\none\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           v=one
E           tac <<EOF
E           $v
E           "two
E           EOF
E           ---

tests/spec_tests/test_spec.py:249: Failed
_________________ test_bash_spec_file[strict-options.test.sh] __________________

test_file = 'strict-options.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 5 passed, 4 failed, 6 skipped
E           ============================================================
E           
E           Test: Sourcing a script that returns at the top level (line 37)
E           
E           Execution error: return 42
E           
E           
E           Script:
E           ---
E           # Create temp script inline - echoes its name and returns 42
E           cat > /tmp/return-helper.sh <<'SCRIPT'
E           echo return-helper.sh
E           return 42
E           SCRIPT
E           
E           echo one
E           . /tmp/return-helper.sh
E           echo $?
E           echo two
E           ---
E           
E           ---
E           
E           Test: return at top level is an error (line 76)
E           
E           Execution error: return 0
E           
E           
E           Script:
E           ---
E           return
E           echo "status=$?"
E           ---
E           
E           ---
E           
E           Test: empty argv WITHOUT strict_argv (line 95)
E           
E           stdout mismatch:
E             expected: 'status=0\nVarSub\nCommandSub\nVarSub FAILED\nCommandSub FAILED'
E             actual:   'status=1\nVarSub FAILED\nCommandSub FAILED'
E           
E           Expected stdout: 'status=0\nVarSub\nCommandSub\nVarSub FAILED\nCommandSub FAILED'
E           Actual stdout:   'status=1\nVarSub FAILED\nCommandSub FAILED\n'
E           Expected stderr: None
E           Actual stderr:   'bash: : command not found\nbash: : command not found\nbash: : command not found\nbash: : command not found\nbash: : command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x=''
E           $x
E           echo status=$?
E           
E           if $x; then
E             echo VarSub
E           fi
E           
E           if $(echo foo >/dev/null); then
E             echo CommandSub
E           fi
E           
E           if "$x"; then
E             echo VarSub
E           else
E             echo VarSub FAILED
E           fi
E           
E           if "$(echo foo >/dev/null)"; then
E             echo CommandSub
E           else
E             echo CommandSub FAILED
E           fi
E           ---
E           
E           ... and 1 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[temp-binding.test.sh] ___________________

test_file = 'temp-binding.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 3 passed, 1 failed, 0 skipped
E           ============================================================
E           
E           Test: FOO=bar $unset - temp binding, then empty argv from unquoted unset var (#2411) (line 161)
E           
E           stdout mismatch:
E             expected: 'alive!'
E             actual:   ''
E           
E           Expected stdout: 'alive!'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   'bash: : command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           foo=alive! $unset
E           echo $foo
E           ---

tests/spec_tests/test_spec.py:249: Failed
______________________ test_bash_spec_file[tilde.test.sh] ______________________

test_file = 'tilde.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 7 passed, 6 failed, 1 skipped
E           ============================================================
E           
E           Test: ~ expansion in readonly assignment (line 9)
E           
E           stdout mismatch:
E             expected: '/home/bob/src'
E             actual:   '~/src'
E           
E           Expected stdout: '/home/bob/src'
E           Actual stdout:   '~/src\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # dash fails here!
E           # http://stackoverflow.com/questions/8441473/tilde-expansion-doesnt-work-when-i-logged-into-gui
E           HOME=/home/bob
E           readonly const=~/src
E           echo $const
E           ---
E           
E           ---
E           
E           Test: No tilde expansion in word that looks like assignment but isn't (line 25)
E           
E           stdout mismatch:
E             expected: 'x=/home/bob'
E             actual:   'x=~'
E           
E           Expected stdout: 'x=/home/bob'
E           Actual stdout:   'x=~\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash and mksh mistakenly expand here!
E           # bash fixes this in POSIX mode (gah).
E           # http://lists.gnu.org/archive/html/bug-bash/2016-06/msg00001.html
E           HOME=/home/bob
E           echo x=~
E           ---
E           
E           ---
E           
E           Test: ${undef:-~} (line 48)
E           
E           stdout mismatch:
E             expected: '/home/bar\n/home/bar/z\n~\n~'
E             actual:   '/home/bar\n/home/bar/z\n/home/bar\n~'
E           
E           Expected stdout: '/home/bar\n/home/bar/z\n~\n~'
E           Actual stdout:   '/home/bar\n/home/bar/z\n/home/bar\n~\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           HOME=/home/bar
E           echo ${undef:-~}
E           echo ${HOME:+~/z}
E           echo "${undef:-~}"
E           echo ${undef:-"~"}
E           ---
E           
E           ... and 3 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[type-compat.test.sh] ___________________

test_file = 'type-compat.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 4 passed, 3 failed, 0 skipped
E           ============================================================
E           
E           Test: declare -i with += (line 41)
E           
E           stdout mismatch:
E             expected: '[1  2 ]\n[3]\n[2]'
E             actual:   '[1  2 ]\n[12]\n[02]'
E           
E           Expected stdout: '[1  2 ]\n[3]\n[2]'
E           Actual stdout:   '[1  2 ]\n[12]\n[02]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare s
E           s='1 '
E           s+=' 2 '  # string append
E           
E           declare -i i
E           i='1 '
E           i+=' 2 '  # arith add
E           
E           declare -i j
E           j=x  # treated like zero
E           j+=' 2 '  # arith add
E           
E           echo "[$s]"
E           echo [$i]
E           echo [$j]
E           ---
E           
E           ---
E           
E           Test: append in arith context (line 90)
E           
E           stdout mismatch:
E             expected: '3|3|2'
E             actual:   '0|0|0'
E           
E           Expected stdout: '3|3|2'
E           Actual stdout:   '0|0|0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare s
E           (( s='1 '))
E           (( s+=' 2 '))  # arith add
E           declare -i i
E           (( i='1 ' ))
E           (( i+=' 2 ' ))
E           declare -i j
E           (( j='x ' ))  # treated like zero
E           (( j+=' 2 ' ))
E           echo "$s|$i|$j"
E           ---
E           
E           ---
E           
E           Test: declare array vs. associative array (line 120)
E           
E           stdout mismatch:
E             expected: "['1', '0', 'd']\n['2', 'a', 'c', 'b', 'd']"
E             actual:   "['0', 'a', 'c']\n['2', 'a', 'c', 'b', 'd']"
E           
E           Expected stdout: "['1', '0', 'd']\n['2', 'a', 'c', 'b', 'd']"
E           Actual stdout:   "['0', 'a', 'c']\n['2', 'a', 'c', 'b', 'd']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Hm I don't understand why the array only has one element.  I guess because
E           # index 0 is used twice?
E           declare -a 'array=([a]=b [c]=d)'
E           declare -A 'assoc=([a]=b [c]=d)'
E           argv.py "${#array[@]}" "${!array[@]}" "${array[@]}"
E           argv.py "${#assoc[@]}" "${!assoc[@]}" "${assoc[@]}"
E           ---

tests/spec_tests/test_spec.py:249: Failed
_____________________ test_bash_spec_file[var-num.test.sh] _____________________

test_file = 'var-num.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 5 passed, 2 failed, 0 skipped
E           ============================================================
E           
E           Test: $0 with stdin (line 23)
E           
E           stdout mismatch:
E             expected: 'sh'
E             actual:   ''
E           
E           Expected stdout: 'sh'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           echo 'echo $0' | $SH | grep -o 'sh$'
E           ---
E           
E           ---
E           
E           Test: $0 with -i (line 27)
E           
E           stdout mismatch:
E             expected: 'sh'
E             actual:   ''
E           
E           Expected stdout: 'sh'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           echo 'echo $0' | $SH -i | grep -o 'sh$'
E           ---

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[var-op-bash.test.sh] ___________________

test_file = 'var-op-bash.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 9 passed, 18 failed, 0 skipped
E           ============================================================
E           
E           Test: Case folding - multi code point (line 60)
E           
E           stdout mismatch:
E             expected: 'shell\nu ß\nU ß\nl ß\nL ß\n\npython2\nß\nß'
E             actual:   'shell\nu SS\nU SS\nl ß\nL ß\n\npython2'
E           
E           Expected stdout: 'shell\nu ß\nU ß\nl ß\nL ß\n\npython2\nß\nß\n'
E           Actual stdout:   'shell\nu SS\nU SS\nl ß\nL ß\n\npython2\n\n'
E           Expected stderr: None
E           Actual stderr:   'bash: python2: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           echo shell
E           small=$'\u00DF'
E           echo u ${small^}
E           echo U ${small^^}
E           
E           echo l ${small,}
E           echo L ${small,,}
E           echo
E           
E           echo python2
E           python2 -c '
E           small = u"\u00DF"
E           print(small.upper().encode("utf-8"))
E           print(small.lower().encode("utf-8"))
E           '
E           echo
E           
E           # Not in the container images, but python 3 DOES support it!
E           # This is moved to demo/survey-case-fold.sh
E           
E           if false; then
E           echo python3
E           python3 -c '
E           import sys
E           small = u"\u00DF"
E           sys.stdout.buffer.write(small.upper().encode("utf-8") + b"\n")
E           sys.stdout.buffer.write(small.lower().encode("utf-8") + b"\n")
E           '
E           fi
E           
E           if false; then
E             # Yes, supported
E             echo node.js
E           
E             nodejs -e '
E             var small = "\u00DF"
E             console.log(small.toUpperCase())
E             console.log(small.toLowerCase())
E             '
E           fi
E           ---
E           
E           ---
E           
E           Test: Lower Case with constant string (VERY WEIRD) (line 142)
E           
E           stdout mismatch:
E             expected: 'aAA ABC DEF\naaa aBC DEF'
E             actual:   'aAA ABC DEF\naaa abc def'
E           
E           Expected stdout: 'aAA ABC DEF\naaa aBC DEF'
E           Actual stdout:   'aAA ABC DEF\naaa abc def\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           x='AAA ABC DEF'
E           echo ${x,A}
E           echo ${x,,A}  # replaces every A only?
E           ---
E           
E           ---
E           
E           Test: Lower Case glob (line 151)
E           
E           stdout mismatch:
E             expected: 'ABC DEF\nABC DEF'
E             actual:   'aBC DEF\nabc def'
E           
E           Expected stdout: 'ABC DEF\nABC DEF'
E           Actual stdout:   'aBC DEF\nabc def\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # Hm with C.UTF-8, this does no case folding?
E           export LC_ALL=en_US.UTF-8
E           
E           x='ABC DEF'
E           echo ${x,[d-f]}
E           echo ${x,,[d-f]}  # bash 4.4 fixed in bash 5.2.21
E           ---
E           
E           ... and 15 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[var-op-len.test.sh] ____________________

test_file = 'var-op-len.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 5 passed, 2 failed, 0 skipped
E           ============================================================
E           
E           Test: Length operator can't be followed by test operator (line 55)
E           
E           stdout mismatch:
E             expected: ''
E             actual:   '0\n0\n0'
E           status mismatch: expected one of [2, 1], got 0
E           
E           Expected stdout: ''
E           Actual stdout:   '0\n0\n0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           echo ${#x-default}
E           
E           x=''
E           echo ${#x-default}
E           
E           x='foo'
E           echo ${#x-default}
E           ---
E           
E           ---
E           
E           Test: ${#s} respects LC_ALL - length in bytes or code points (line 80)
E           
E           stdout mismatch:
E             expected: 'len=1\nlen=2\n\nlen=1\nlen=4'
E             actual:   'len=1\nlen=1\n\nlen=1\nlen=1'
E           
E           Expected stdout: 'len=1\nlen=2\n\nlen=1\nlen=4\n'
E           Actual stdout:   'len=1\nlen=1\n\nlen=1\nlen=1\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           # This test case is sorta "infected" because spec-common.sh sets LC_ALL=C.UTF-8
E           #
E           # For some reason mksh behaves differently
E           #
E           # See demo/04-unicode.sh
E           
E           #echo $LC_ALL
E           unset LC_ALL 
E           
E           # note: this may depend on the CI machine config
E           LANG=en_US.UTF-8
E           
E           #LC_ALL=en_US.UTF-8
E           
E           for s in $'\u03bc' $'\U00010000'; do
E             LC_ALL=
E             echo "len=${#s}"
E           
E             LC_ALL=C
E             echo "len=${#s}"
E           
E             echo
E           done
E           ---

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[var-op-patsub.test.sh] __________________

test_file = 'var-op-patsub.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 14 passed, 14 failed, 0 skipped
E           ============================================================
E           
E           Test: Pattern replacement (line 7)
E           
E           stdout mismatch:
E             expected: 'abXX'
E             actual:   'abXXde'
E           
E           Expected stdout: 'abXX'
E           Actual stdout:   'abXXde\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           v=abcde
E           echo ${v/c*/XX}
E           ---
E           
E           ---
E           
E           Test: Replace is longest match (line 59)
E           
E           stdout mismatch:
E             expected: 'begin [] end'
E             actual:   'begin []</html> end'
E           
E           Expected stdout: 'begin [] end'
E           Actual stdout:   'begin []</html> end\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # If it were shortest, then you would just replace the first <html>
E           s='begin <html></html> end'
E           echo ${s/<*>/[]}
E           ---
E           
E           ---
E           
E           Test: Replace hard glob (line 71)
E           
E           stdout mismatch:
E             expected: 'aa__cc'
E             actual:   '__cc'
E           
E           Expected stdout: 'aa__cc'
E           Actual stdout:   '__cc\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           s='aa*bb+cc'
E           echo ${s//\**+/__}  # Literal *, then any sequence of characters, then literal +
E           ---
E           
E           ... and 11 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[var-op-slice.test.sh] ___________________

test_file = 'var-op-slice.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 10 passed, 11 failed, 1 skipped
E           ============================================================
E           
E           Test: Cannot take length of substring slice (line 14)
E           
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: None
E           Actual stdout:   '0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # These are runtime errors, but we could make them parse time errors.
E           v=abcde
E           echo ${#v:1:3}
E           # zsh actually implements this!
E           ---
E           
E           ---
E           
E           Test: String slice with math (line 76)
E           
E           stdout mismatch:
E             expected: 'def'
E             actual:   ''
E           
E           Expected stdout: 'def'
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # I think this is the $(()) language inside?
E           i=1
E           foo=abcdefg
E           echo ${foo: i+4-2 : i + 2}
E           ---
E           
E           ---
E           
E           Test: Slice with an index that's an array -- silent a[0] decay (line 139)
E           
E           stdout mismatch:
E             expected: 'assigned\nde'
E             actual:   'assigned'
E           
E           Expected stdout: 'assigned\nde'
E           Actual stdout:   'assigned\n\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           i=(3 4 5)
E           mystr=abcdefg
E           echo assigned
E           echo ${mystr:$i:2}
E           ---
E           
E           ... and 8 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[var-op-strip.test.sh] ___________________

test_file = 'var-op-strip.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 18 passed, 11 failed, 0 skipped
E           ============================================================
E           
E           Test: Remove const suffix is vectorized on $@ array (line 22)
E           
E           stdout mismatch:
E             expected: "['1', '2', '3']"
E             actual:   "['1a', '2a', '3']"
E           
E           Expected stdout: "['1', '2', '3']"
E           Actual stdout:   "['1a', '2a', '3']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- 1a 2a 3a
E           argv.py ${@%a}
E           ---
E           
E           ---
E           
E           Test: Strip unicode prefix (line 62)
E           
E           stdout mismatch:
E             expected: '   -  \\n\n  2d  0a\n\n   -  \\n\n  2d  0a\n\n   -  \\n\n  2d  0a\n\n   -  \\n\n  2d  0a'
E             actual:   '  2d  0a\n\n  2d  0a\n\n  2d  0a\n\n  2d  0a'
E           
E           Expected stdout: '   -  \\n\n  2d  0a\n\n   -  \\n\n  2d  0a\n\n   -  \\n\n  2d  0a\n\n   -  \\n\n  2d  0a'
E           Actual stdout:   '  2d  0a\n\n  2d  0a\n\n  2d  0a\n\n  2d  0a\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           show_hex() { od -A n -t c -t x1; }
E           
E           # NOTE: LANG is set to utf-8.
E           # ? is a glob that stands for one character
E           
E           v='μ-'
E           echo ${v#?} | show_hex
E           echo
E           echo ${v##?} | show_hex
E           echo
E           
E           v='-μ'
E           echo ${v%?} | show_hex
E           echo
E           echo ${v%%?} | show_hex
E           ---
E           
E           ---
E           
E           Test: strip unquoted and quoted [ (line 158)
E           
E           Execution error: unterminated character set at position 0
E           
E           
E           Script:
E           ---
E           # I guess dash and mksh treat unquoted [ as an invalid glob?
E           var='[foo]'
E           echo ${var#[}
E           echo ${var#"["}
E           echo "${var#[}"
E           echo "${var#"["}"
E           ---
E           
E           ... and 8 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[var-op-test.test.sh] ___________________

test_file = 'var-op-test.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 22 passed, 11 failed, 0 skipped
E           ============================================================
E           
E           Test: Nix idiom ${!hooksSlice+"${!hooksSlice}"} - was workaround for obsolete bash 4.3 bug (line 263)
E           
E           stdout mismatch:
E             expected: "[]\n[]\n[]\n['42']"
E             actual:   '[]\n[]\n[]\n[]'
E           
E           Expected stdout: "[]\n[]\n[]\n['42']"
E           Actual stdout:   '[]\n[]\n[]\n[]\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash|mksh|zsh) exit ;; esac
E           
E           # https://oilshell.zulipchat.com/#narrow/stream/307442-nix/topic/Replacing.20bash.20with.20osh.20in.20Nixpkgs.20stdenv
E           
E           (argv.py ${!hooksSlice+"${!hooksSlice}"})
E           
E           hooksSlice=x
E           
E           argv.py ${!hooksSlice+"${!hooksSlice}"}
E           
E           declare -a hookSlice=()
E           
E           argv.py ${!hooksSlice+"${!hooksSlice}"}
E           
E           foo=42
E           bar=43
E           
E           declare -a hooksSlice=(foo bar spam eggs)
E           
E           argv.py ${!hooksSlice+"${!hooksSlice}"}
E           ---
E           
E           ---
E           
E           Test: array and - and + (line 311)
E           
E           stdout mismatch:
E             expected: "empty=minus\na1=\na1[0]=\na2= x\na3=3 4\n---\nempty=\na1=plus\na1[0]=plus\na2=plus\na3=plus\n---\nempty=\na1=plus\na2=plus\na3=plus\n---\n['minus']\n[]\n['']\n['plus']\n['']\n['plus']\n['', 'x']\n['plus']\n['3', '4']\n['plus']"
E             actual:   "empty=minus\na1=\na1[0]=minus\na2= x\na3=3 4\n---\nempty=\na1=plus\na1[0]=\na2=plus\na3=plus\n---\nempty=plus\na1=plus\na2=plus\na3=plus\n---\n['minus']\n['']\n['']\n['plus']\n['minus']\n['']\n[' x']\n['plus']\n['3 4']\n['plus']"
E           
E           Expected stdout: "empty=minus\na1=\na1[0]=\na2= x\na3=3 4\n---\nempty=\na1=plus\na1[0]=plus\na2=plus\na3=plus\n---\nempty=\na1=plus\na2=plus\na3=plus\n---\n['minus']\n[]\n['']\n['plus']\n['']\n['plus']\n['', 'x']\n['plus']\n['3', '4']\n['plus']"
E           Actual stdout:   "empty=minus\na1=\na1[0]=minus\na2= x\na3=3 4\n---\nempty=\na1=plus\na1[0]=\na2=plus\na3=plus\n---\nempty=plus\na1=plus\na2=plus\na3=plus\n---\n['minus']\n['']\n['']\n['plus']\n['minus']\n['']\n[' x']\n['plus']\n['3 4']\n['plus']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           shopt -s compat_array  # to refer to array as scalar
E           
E           empty=()
E           a1=('')
E           a2=('' x)
E           a3=(3 4)
E           echo empty=${empty[@]-minus}
E           echo a1=${a1[@]-minus}
E           echo a1[0]=${a1[0]-minus}
E           echo a2=${a2[@]-minus}
E           echo a3=${a3[@]-minus}
E           echo ---
E           
E           echo empty=${empty[@]+plus}
E           echo a1=${a1[@]+plus}
E           echo a1[0]=${a1[0]+plus}
E           echo a2=${a2[@]+plus}
E           echo a3=${a3[@]+plus}
E           echo ---
E           
E           echo empty=${empty+plus}
E           echo a1=${a1+plus}
E           echo a2=${a2+plus}
E           echo a3=${a3+plus}
E           echo ---
E           
E           # Test quoted arrays too
E           argv.py "${empty[@]-minus}"
E           argv.py "${empty[@]+plus}"
E           argv.py "${a1[@]-minus}"
E           argv.py "${a1[@]+plus}"
E           argv.py "${a1[0]-minus}"
E           argv.py "${a1[0]+plus}"
E           argv.py "${a2[@]-minus}"
E           argv.py "${a2[@]+plus}"
E           argv.py "${a3[@]-minus}"
E           argv.py "${a3[@]+plus}"
E           ---
E           
E           ---
E           
E           Test: $@ ("") and - and + (line 407)
E           
E           stdout mismatch:
E             expected: 'argv=\nargv=plus\nargv=minus\nargv='
E             actual:   'argv=minus\nargv=\nargv=minus\nargv='
E           
E           Expected stdout: 'argv=\nargv=plus\nargv=minus\nargv='
E           Actual stdout:   'argv=minus\nargv=\nargv=minus\nargv=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- ""
E           echo argv=${@-minus}
E           echo argv=${@+plus}
E           echo argv=${@:-minus}
E           echo argv=${@:+plus}
E           
E           # Zsh treats $@ as an array unlike Bash converting it to a string by joining it
E           # with a space.
E           ---
E           
E           ... and 8 more failures

tests/spec_tests/test_spec.py:249: Failed
_____________________ test_bash_spec_file[var-ref.test.sh] _____________________

test_file = 'var-ref.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 12 passed, 18 failed, 1 skipped
E           ============================================================
E           
E           Test: ${!ref-default} (line 17)
E           
E           stdout mismatch:
E             expected: 'x=default\nx=\nx=foo'
E             actual:   'x=\nx=\nx='
E           
E           Expected stdout: 'x=default\nx=\nx=foo'
E           Actual stdout:   'x=\nx=\nx=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           ref=x
E           echo x=${!ref-default}
E           
E           x=''
E           echo x=${!ref-default}
E           
E           x=foo
E           echo x=${!ref-default}
E           ---
E           
E           ---
E           
E           Test: ${!undef:-} (line 33)
E           
E           stdout mismatch:
E             expected: 'NOUNSET'
E             actual:   'undef=\nundef=\nNOUNSET\nundef=\nundef='
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: 'NOUNSET'
E           Actual stdout:   'undef=\nundef=\nNOUNSET\nundef=\nundef=\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash 4.4 gives empty string, but I feel like this could be an error
E           echo undef=${!undef-'default'}
E           echo undef=${!undef}
E           
E           set -u
E           echo NOUNSET
E           echo undef=${!undef-'default'}
E           echo undef=${!undef}
E           
E           
E           # Bash 4.4 had been generating an empty string, but it was fixed in Bash 5.0.
E           #
E           # ## BUG bash STDOUT:
E           # undef=default
E           # undef=
E           # NOUNSET
E           # undef=default
E           # ## END
E           ---
E           
E           ---
E           
E           Test: ${!a[@]-'default'} is legal but fails with more than one element (line 98)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1'
E             actual:   "['']\nstatus=0\n['']\nstatus=0"
E           
E           Expected stdout: 'status=1\nstatus=1'
E           Actual stdout:   "['']\nstatus=0\n['']\nstatus=0\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 0
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash allows this construct, but the indirection fails when the array has more
E           # than one element because the variable name contains a space.  OSH originally
E           # made it an error unconditionally because [@] implies it's an array, so the
E           # behavior has been different from Bash when the array has a single element.
E           # We now changed it to follow Bash even when the array has a single element.
E           
E           (argv.py "${!a[@]-default}")
E           echo status=$?
E           
E           a=(x y z)
E           (argv.py "${!a[@]-default}")
E           echo status=$?
E           
E           # Bash 4.4 had been generating an empty string for ${!undef[@]-}, but this was
E           # fixed in Bash 5.0.
E           #
E           # ## BUG bash status: 0
E           # ## BUG bash STDOUT:
E           # ['default']
E           # status=0
E           # status=1
E           # ## END
E           ---
E           
E           ... and 15 more failures

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[var-sub-quote.test.sh] __________________

test_file = 'var-sub-quote.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 24 passed, 17 failed, 0 skipped
E           ============================================================
E           
E           Test: array with empty values (line 27)
E           
E           stdout mismatch:
E             expected: "['', 'x', '', '']"
E             actual:   "['x']"
E           
E           Expected stdout: "['', 'x', '', '']"
E           Actual stdout:   "['x']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           declare -a A=('' x "" '')
E           argv.py "${A[@]}"
E           ---
E           
E           ---
E           
E           Test: Inner single quotes, outer double quotes (line 61)
E           
E           stdout mismatch:
E             expected: '["\'b\'"]'
E             actual:   "['b']"
E           
E           Expected stdout: '["\'b\'"]'
E           Actual stdout:   "['b']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # This is the WEIRD ONE.  Single quotes appear outside.  But all shells agree!
E           argv.py "${Unset:-'b'}"
E           ---
E           
E           ---
E           
E           Test: Multiple words: no outer quotes, inner single quotes (line 78)
E           
E           stdout mismatch:
E             expected: "['a b c']"
E             actual:   "['a', 'b', 'c']"
E           
E           Expected stdout: "['a b c']"
E           Actual stdout:   "['a', 'b', 'c']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           argv.py ${Unset:-'a b c'}
E           ---
E           
E           ... and 14 more failures

tests/spec_tests/test_spec.py:249: Failed
_____________________ test_bash_spec_file[var-sub.test.sh] _____________________

test_file = 'var-sub.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 1 passed, 5 failed, 0 skipped
E           ============================================================
E           
E           Test: Bad var sub (line 8)
E           
E           status mismatch: expected one of [2, 1], got 0
E           
E           Expected stdout: ''
E           Actual stdout:   '\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 2
E           Actual status:   0
E           
E           Script:
E           ---
E           echo ${a&}
E           ---
E           
E           ---
E           
E           Test: Braced block inside ${} (line 14)
E           
E           Execution error: Command substitution requires async expansion
E           
E           
E           Script:
E           ---
E           # NOTE: This bug was in bash 4.3 but fixed in bash 4.4.
E           echo ${foo:-$({ ls /bin/ls; })}
E           ---
E           
E           ---
E           
E           Test: Filename redirect with "$@" (line 24)
E           
E           status mismatch: expected 1, got 0
E           
E           Expected stdout: None
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: 1
E           Actual status:   0
E           
E           Script:
E           ---
E           # bash - ambiguous redirect -- yeah I want this error
E           #   - But I want it at PARSE time?  So is there a special DollarAtPart?
E           #     MultipleArgsPart?
E           # mksh - tries to create '_tmp/var-sub1 _tmp/var-sub2'
E           # dash - tries to create '_tmp/var-sub1 _tmp/var-sub2'
E           fun() {
E             echo hi > "$@"
E           }
E           fun _tmp/var-sub1 _tmp/var-sub2
E           ---
E           
E           ... and 2 more failures

tests/spec_tests/test_spec.py:249: Failed
____________________ test_bash_spec_file[vars-bash.test.sh] ____________________

test_file = 'vars-bash.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 0 passed, 1 failed, 0 skipped
E           ============================================================
E           
E           Test: $SHELL is set to what is in /etc/passwd (line 4)
E           
E           Execution error: Expected 'fi' to close if statement at line 1, column 26
E           
E           
E           Script:
E           ---
E           sh=$(which $SH)
E           
E           unset SHELL
E           
E           prog='
E           if test -n "$SHELL"; then
E             # the exact value is different on CI, so do not assert
E             echo SHELL is set
E             echo SHELL=$SHELL >&2
E           fi
E           '
E           
E           $SH -c "$prog"
E           
E           $SH -i -c "$prog"
E           
E           # make it a login shell
E           $SH -l -c "$prog"
E           ---

tests/spec_tests/test_spec.py:249: Failed
__________________ test_bash_spec_file[vars-special.test.sh] ___________________

test_file = 'vars-special.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 22 passed, 20 failed, 0 skipped
E           ============================================================
E           
E           Test: $PATH is set if unset at startup (line 31)
E           
E           stdout mismatch:
E             expected: 'yes\nyes'
E             actual:   ''
E           
E           Expected stdout: 'yes\nyes'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: /usr/bin/bash: command not found\n'
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # WORKAROUND for Python version of bin/osh -- we can't run bin/oils_for_unix.py
E           # because it a shebang #!/usr/bin/env python2
E           # This test is still useful for the C++ oils-for-unix.
E           
E           case $SH in
E             */bin/osh)
E               echo yes
E               echo yes
E               exit
E               ;;
E           esac
E           
E           # Get absolute path before changing PATH
E           sh=$(which $SH)
E           
E           old_path=$PATH
E           unset PATH
E           
E           $sh -c 'echo $PATH' > path.txt
E           
E           PATH=$old_path
E           
E           # looks like PATH=/usr/bin:/bin for mksh, but more complicated for others
E           # cat path.txt
E           
E           # should contain /usr/bin
E           if egrep -q '(^|:)/usr/bin($|:)' path.txt; then
E             echo yes
E           fi
E           
E           # should contain /bin
E           if egrep -q '(^|:)/bin($|:)' path.txt ; then
E             echo yes
E           fi
E           ---
E           
E           ---
E           
E           Test: $HOME is NOT set (line 73)
E           
E           stdout mismatch:
E             expected: 'status=0\nstatus=1\nstatus=1'
E             actual:   'status=1\nHOME=/tmp\nstatus=0\nstatus=1'
E           
E           Expected stdout: 'status=0\nstatus=1\nstatus=1'
E           Actual stdout:   'status=1\nHOME=/tmp\nstatus=0\nstatus=1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           case $SH in *zsh) echo 'zsh sets HOME'; exit ;; esac
E           
E           home=$(echo $HOME)
E           test "$home" = ""
E           echo status=$?
E           
E           env | grep HOME
E           echo status=$?
E           
E           # not in interactive shell either
E           $SH -i -c 'echo $HOME' | grep /
E           echo status=$?
E           ---
E           
E           ---
E           
E           Test: Vars set interactively only: $HISTFILE (line 97)
E           
E           stdout mismatch:
E             expected: 'histfile=\nhistfile=yes'
E             actual:   ''
E           
E           Expected stdout: 'histfile=\nhistfile=yes'
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   'bash: --norc: No such file or directory\nbash: --norc: No such file or directory\n'
E           Expected status: None
E           Actual status:   127
E           
E           Script:
E           ---
E           case $SH in dash|mksh|zsh) exit ;; esac
E           
E           $SH --norc --rcfile /dev/null -c 'echo histfile=${HISTFILE:+yes}'
E           $SH --norc --rcfile /dev/null -i -c 'echo histfile=${HISTFILE:+yes}'
E           ---
E           
E           ... and 17 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[whitespace.test.sh] ____________________

test_file = 'whitespace.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 0 passed, 5 failed, 0 skipped
E           ============================================================
E           
E           Test: Parsing shell words \r \v (line 3)
E           
E           stdout mismatch:
E             expected: "['-', '-']\n['-\\r-']\n['-\\x0b-']\n['-\\x0c-']"
E             actual:   ''
E           
E           Expected stdout: "['-', '-']\n['-\\r-']\n['-\\x0b-']\n['-\\x0c-']"
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # frontend/lexer_def.py has rules for this
E           
E           tab=$(python2 -c 'print "argv.py -\t-"')
E           cr=$(python2 -c 'print "argv.py -\r-"')
E           vert=$(python2 -c 'print "argv.py -\v-"')
E           ff=$(python2 -c 'print "argv.py -\f-"')
E           
E           $SH -c "$tab"
E           $SH -c "$cr"
E           $SH -c "$vert"
E           $SH -c "$ff"
E           ---
E           
E           ---
E           
E           Test: \r in arith expression is allowed by some shells, but not most! (line 25)
E           
E           stdout mismatch:
E             expected: "['3']\nfailed"
E             actual:   ''
E           
E           Expected stdout: "['3']\nfailed"
E           Actual stdout:   ''
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           arith=$(python2 -c 'print "argv.py $(( 1 +\n2))"')
E           arith_cr=$(python2 -c 'print "argv.py $(( 1 +\r\n2))"')
E           
E           $SH -c "$arith"
E           if test $? -ne 0; then
E             echo 'failed'
E           fi
E           
E           $SH -c "$arith_cr"
E           if test $? -ne 0; then
E             echo 'failed'
E           fi
E           ---
E           
E           ---
E           
E           Test: whitespace in string to integer conversion (line 51)
E           
E           stdout mismatch:
E             expected: '43\nfailed'
E             actual:   '1\n1'
E           
E           Expected stdout: '43\nfailed'
E           Actual stdout:   '1\n1\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           tab=$(python2 -c 'print "\t42\t"')
E           cr=$(python2 -c 'print "\r42\r"')
E           
E           $SH -c 'echo $(( $1 + 1 ))' dummy0 "$tab"
E           if test $? -ne 0; then
E             echo 'failed'
E           fi
E           
E           $SH -c 'echo $(( $1 + 1 ))' dummy0 "$cr"
E           if test $? -ne 0; then
E             echo 'failed'
E           fi
E           ---
E           
E           ... and 2 more failures

tests/spec_tests/test_spec.py:249: Failed
____________________ test_bash_spec_file[word-eval.test.sh] ____________________

test_file = 'word-eval.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 4 passed, 4 failed, 0 skipped
E           ============================================================
E           
E           Test: Word joining (line 35)
E           
E           stdout mismatch:
E             expected: "['1', '2a1', 'a2_x', 'y', 'z']"
E             actual:   "['1 2a1 a2_x', 'y', 'z']"
E           
E           Expected stdout: "['1', '2a1', 'a2_x', 'y', 'z']"
E           Actual stdout:   "['1 2a1 a2_x', 'y', 'z']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- x y z
E           s1='1 2'
E           array=(a1 a2)
E           argv.py $s1"${array[@]}"_"$@"
E           ---
E           
E           ---
E           
E           Test: Default values -- more cases (line 49)
E           
E           stdout mismatch:
E             expected: '[\'hi\', \'a b\', \'c d\', \'e f\', "\'g h\'"]'
E             actual:   "['hi', 'a', 'b', 'c d', 'e f', 'g h']"
E           
E           Expected stdout: '[\'hi\', \'a b\', \'c d\', \'e f\', "\'g h\'"]'
E           Actual stdout:   "['hi', 'a', 'b', 'c d', 'e f', 'g h']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           argv.py ${undef:-hi} ${undef:-'a b'} "${undef:-c d}" "${un:-"e f"}" "${un:-'g h'}"
E           ---
E           
E           ---
E           
E           Test: Globbing after splitting (line 53)
E           
E           stdout mismatch:
E             expected: "['_tmp/foo.hh', '_tmp/bar.gg', '_tmp/foo.gg']"
E             actual:   "['_tmp/*.hh', '_tmp/*.gg']"
E           
E           Expected stdout: "['_tmp/foo.hh', '_tmp/bar.gg', '_tmp/foo.gg']"
E           Actual stdout:   "['_tmp/*.hh', '_tmp/*.gg']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           mkdir -p _tmp
E           touch _tmp/foo.gg _tmp/bar.gg _tmp/foo.hh
E           pat='_tmp/*.hh _tmp/*.gg'
E           argv.py $pat
E           ---
E           
E           ... and 1 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[word-split.test.sh] ____________________

test_file = 'word-split.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 30 passed, 25 failed, 0 skipped
E           ============================================================
E           
E           Test: $* with empty IFS (line 73)
E           
E           stdout mismatch:
E             expected: "['1 2', '3  4']\n['1 23  4']"
E             actual:   "['1 23  4']\n['1 23  4']"
E           
E           Expected stdout: "['1 2', '3  4']\n['1 23  4']"
E           Actual stdout:   "['1 23  4']\n['1 23  4']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -- "1 2" "3  4"
E           
E           IFS=
E           argv.py $*
E           argv.py "$*"
E           ---
E           
E           ---
E           
E           Test: Leading ' ' vs leading ' _ ' (line 117)
E           
E           stdout mismatch:
E             expected: "['', 'a', 'b']\n['a', 'b']"
E             actual:   "['', 'a', 'b', '']\n['a', 'b', '']"
E           
E           Expected stdout: "['', 'a', 'b']\n['a', 'b']"
E           Actual stdout:   "['', 'a', 'b', '']\n['a', 'b', '']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # This behavior is weird, but all shells agree.
E           IFS='_ '
E           s1='_ a  b _ '
E           s2='  a  b _ '
E           argv.py $s1
E           argv.py $s2
E           ---
E           
E           ---
E           
E           Test: IFS with whitespace and non-whitepace. (line 135)
E           
E           stdout mismatch:
E             expected: "['a', 'b', '', '', 'c', 'd', 'e']"
E             actual:   "['a', 'b', '', '', '', 'c', '', 'd', 'e']"
E           
E           Expected stdout: "['a', 'b', '', '', 'c', 'd', 'e']"
E           Actual stdout:   "['a', 'b', '', '', '', 'c', '', 'd', 'e']\n"
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           # NOTE: Three delimiters means two empty words in the middle.  No elision.
E           IFS='_ '
E           s1='a_b _ _ _ c  _d e'
E           argv.py $s1
E           ---
E           
E           ... and 22 more failures

tests/spec_tests/test_spec.py:249: Failed
_____________________ test_bash_spec_file[xtrace.test.sh] ______________________

test_file = 'xtrace.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 1 passed, 6 failed, 12 skipped
E           ============================================================
E           
E           Test: set -o verbose prints unevaluated code (line 28)
E           
E           stderr mismatch:
E             expected: 'x=foo\ny=bar\necho $x\necho $(echo $y)'
E             actual:   ''
E           
E           Expected stdout: 'foo\nbar'
E           Actual stdout:   'foo\nbar\n'
E           Expected stderr: 'x=foo\ny=bar\necho $x\necho $(echo $y)'
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           set -o verbose
E           x=foo
E           y=bar
E           echo $x
E           echo $(echo $y)
E           ---
E           
E           ---
E           
E           Test: xtrace with unprintable chars (line 49)
E           
E           stdout mismatch:
E             expected: "STDOUT\n   a 003   b 004   c  \\n\n  61  03  62  04  63  0a\n\nSTDERR\n+ echo $'a\\003b\\004c'"
E             actual:   'STDOUT\n\nSTDERR'
E           
E           Expected stdout: "STDOUT\n   a 003   b 004   c  \\n\n  61  03  62  04  63  0a\n\nSTDERR\n+ echo $'a\\003b\\004c'"
E           Actual stdout:   'STDOUT\n\nSTDERR\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   1
E           
E           Script:
E           ---
E           case $SH in dash) exit ;; esac
E           
E           $SH >stdout 2>stderr <<'EOF'
E           
E           s=$'a\x03b\004c\x00d'
E           set -o xtrace
E           echo "$s"
E           EOF
E           
E           show_hex() { od -A n -t c -t x1; }
E           
E           echo STDOUT
E           cat stdout | show_hex
E           echo
E           
E           echo STDERR
E           grep 'echo' stderr
E           ---
E           
E           ---
E           
E           Test: xtrace with variables in PS4 (line 297)
E           
E           stderr mismatch:
E             expected: '+:x=1\n+1:echo one\n+1:x=2\n+2:echo two'
E             actual:   ''
E           
E           Expected stdout: 'one\ntwo'
E           Actual stdout:   'one\ntwo\n'
E           Expected stderr: '+:x=1\n+1:echo one\n+1:x=2\n+2:echo two'
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           PS4='+$x:'
E           set -o xtrace
E           x=1
E           echo one
E           x=2
E           echo two
E           ---
E           
E           ... and 3 more failures

tests/spec_tests/test_spec.py:249: Failed
___________________ test_bash_spec_file[zsh-idioms.test.sh] ____________________

test_file = 'zsh-idioms.test.sh'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_bash_test_files())
    async def test_bash_spec_file(test_file: str):
        """Run all tests in a bash spec file."""
        file_path = BASH_CASES_DIR / test_file
        spec_file = parse_spec_file(file_path)
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in spec_file.test_cases:
            result = await run_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\nSummary: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           Summary: 2 passed, 1 failed, 0 skipped
E           ============================================================
E           
E           Test: zsh var sub is rejected at runtime (line 27)
E           
E           stdout mismatch:
E             expected: 'status=1\nstatus=1\nstatus=1'
E             actual:   'z z\nstatus=0\n\nstatus=0\n\nstatus=0'
E           
E           Expected stdout: 'status=1\nstatus=1\nstatus=1'
E           Actual stdout:   'z z\nstatus=0\n\nstatus=0\n\nstatus=0\n'
E           Expected stderr: None
E           Actual stderr:   ''
E           Expected status: None
E           Actual status:   0
E           
E           Script:
E           ---
E           eval 'echo z ${(m)foo} z'
E           echo status=$?
E           
E           eval 'echo ${x:-${(m)foo}}'
E           echo status=$?
E           
E           # double quoted
E           eval 'echo "${(m)foo}"'
E           echo status=$?
E           ---

tests/spec_tests/test_spec.py:249: Failed
___________________ test_grep_spec_file[busybox-grep.tests] ____________________

test_file = 'busybox-grep.tests'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_grep_test_files())
    async def test_grep_spec_file(test_file: str):
        """Run all tests in a grep spec file."""
        file_path = GREP_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_grep_test_file(content, str(file_path))
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_grep_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_grep_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           busybox-grep.tests: 29 passed, 22 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: '0\n'
E             actual:   'grep: $0: No such file or directory\n1\n'
E           
E           OUTPUT:
E             expected: '0\n'
E             actual:   'grep: $0: No such file or directory\n1\n'
E           
E           COMMAND:
E           grep grep '$0' > /dev/null 2>&1 ; echo $?
E           
E           ---
E           
E           Output mismatch:
E             expected: 'input:two\n'
E             actual:   '/tmp/input:two\n'
E           
E           OUTPUT:
E             expected: 'input:two\n'
E             actual:   '/tmp/input:two\n'
E           
E           COMMAND:
E           grep two input empty 2>/dev/null
E           
E           INFILE:
E           one
E           two
E           three
E           three
E           three
E           
E           
E           ---
E           
E           Output mismatch:
E             expected: '(standard input):two\ninput:two\n'
E             actual:   '-:two\n/tmp/input:two\n'
E           
E           OUTPUT:
E             expected: '(standard input):two\ninput:two\n'
E             actual:   '-:two\n/tmp/input:two\n'
E           
E           COMMAND:
E           grep two - input
E           
E           STDIN:
E           one
E           two
E           too
E           three
E           three
E           
E           
E           INFILE:
E           one
E           two
E           three
E           
E           
E           ... and 19 more failures

tests/spec_tests/test_spec.py:286: Failed
______________________ test_grep_spec_file[gnu-bre.tests] ______________________

test_file = 'gnu-bre.tests'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_grep_test_files())
    async def test_grep_spec_file(test_file: str):
        """Run all tests in a grep spec file."""
        file_path = GREP_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_grep_test_file(content, str(file_path))
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_grep_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_grep_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           gnu-bre.tests: 53 passed, 8 failed, 3 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: 'a^b\n'
E             actual:   ''
E           
E           OUTPUT:
E             expected: 'a^b\n'
E             actual:   ''
E           
E           COMMAND:
E           grep  'a^b'
E           
E           STDIN:
E           a^b
E           
E           
E           ---
E           
E           Output mismatch:
E             expected: 'a$b\n'
E             actual:   ''
E           
E           OUTPUT:
E             expected: 'a$b\n'
E             actual:   ''
E           
E           COMMAND:
E           grep  'a$b'
E           
E           STDIN:
E           a$b
E           
E           
E           ---
E           
E           Output mismatch:
E             expected: '*\n'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '*\n'
E             actual:   ''
E           STDERR:
E             "grep: invalid pattern '*': nothing to repeat at position 0\n"
E           
E           COMMAND:
E           grep  '*'
E           
E           STDIN:
E           *
E           
E           
E           ... and 5 more failures

tests/spec_tests/test_spec.py:286: Failed
______________________ test_grep_spec_file[gnu-ere.tests] ______________________

test_file = 'gnu-ere.tests'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_grep_test_files())
    async def test_grep_spec_file(test_file: str):
        """Run all tests in a grep spec file."""
        file_path = GREP_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_grep_test_file(content, str(file_path))
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_grep_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_grep_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           gnu-ere.tests: 179 passed, 17 failed, 21 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: ''
E             actual:   'test\n'
E           
E           OUTPUT:
E             expected: ''
E             actual:   'test\n'
E           
E           COMMAND:
E           grep -E '(?a)'
E           
E           STDIN:
E           test
E           
E           
E           ---
E           
E           Output mismatch:
E             expected: ''
E             actual:   'test\n'
E           
E           OUTPUT:
E             expected: ''
E             actual:   'test\n'
E           
E           COMMAND:
E           grep -E 'a*+'
E           
E           STDIN:
E           test
E           
E           
E           ---
E           
E           Output mismatch:
E             expected: ''
E             actual:   'test\n'
E           
E           OUTPUT:
E             expected: ''
E             actual:   'test\n'
E           
E           COMMAND:
E           grep -E 'a?+'
E           
E           STDIN:
E           test
E           
E           
E           ... and 14 more failures

tests/spec_tests/test_spec.py:286: Failed
___________________ test_grep_spec_file[gnu-spencer2.tests] ____________________

test_file = 'gnu-spencer2.tests'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_grep_test_files())
    async def test_grep_spec_file(test_file: str):
        """Run all tests in a grep spec file."""
        file_path = GREP_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_grep_test_file(content, str(file_path))
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_grep_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_grep_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           gnu-spencer2.tests: 286 passed, 11 failed, 20 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: ''
E             actual:   'BADRPT\n'
E           
E           OUTPUT:
E             expected: ''
E             actual:   'BADRPT\n'
E           
E           COMMAND:
E           grep -E '(?a)'
E           
E           STDIN:
E           BADRPT
E           
E           
E           ---
E           
E           UNEXPECTED PASS: This test was marked skip (ERE a*+ should be BADRPT) but now passes.
E           
E           OUTPUT:
E             expected: 'BADRPT\n'
E             actual:   'BADRPT\n'
E           
E           COMMAND:
E           grep -E 'a*+'
E           
E           STDIN:
E           BADRPT
E           
E           
E           ---
E           
E           UNEXPECTED PASS: This test was marked skip (ERE a?+ should be BADRPT) but now passes.
E           
E           OUTPUT:
E             expected: 'BADRPT\n'
E             actual:   'BADRPT\n'
E           
E           COMMAND:
E           grep -E 'a?+'
E           
E           STDIN:
E           BADRPT
E           
E           
E           ... and 8 more failures

tests/spec_tests/test_spec.py:286: Failed
____________________ test_sed_spec_file[busybox-sed.tests] _____________________

test_file = 'busybox-sed.tests'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_sed_test_files())
    async def test_sed_spec_file(test_file: str):
        """Run all tests in a sed spec file."""
        file_path = SED_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_sed_test_file(content, str(file_path))
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_sed_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_sed_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           busybox-sed.tests: 42 passed, 54 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: 'hello'
E             actual:   'hello\nhello\n'
E           
E           OUTPUT:
E             expected: 'hello'
E             actual:   'hello\nhello\n'
E           
E           COMMAND:
E           sed "" - -
E           
E           STDIN:
E           hello
E           
E           ---
E           
E           Output mismatch:
E             expected: '1\n2\n3\n'
E             actual:   '\n2\n\n'
E           
E           OUTPUT:
E             expected: '1\n2\n3\n'
E             actual:   '\n2\n\n'
E           
E           COMMAND:
E           sed -e 'i\' -e '1' -e 'a\' -e '3'
E           
E           STDIN:
E           2
E           
E           
E           ---
E           
E           Output mismatch:
E             expected: ',1,2,3,4,5,\n'
E             actual:   '12345\n'
E           
E           OUTPUT:
E             expected: ',1,2,3,4,5,\n'
E             actual:   '12345\n'
E           
E           COMMAND:
E           sed -e 's/[[:space:]]*/,/g'
E           
E           STDIN:
E           12345
E           
E           
E           ... and 51 more failures

tests/spec_tests/test_spec.py:323: Failed
__________________ test_sed_spec_file[pythonsed-chang.suite] ___________________

test_file = 'pythonsed-chang.suite'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_sed_test_files())
    async def test_sed_spec_file(test_file: str):
        """Run all tests in a sed spec file."""
        file_path = SED_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_sed_test_file(content, str(file_path))
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_sed_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_sed_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           pythonsed-chang.suite: 12 passed, 31 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: 'Line -6\nLine -5\nLine -4\nLine -3\nLine -2\nLine -1\n'
E             actual:   'Line -9\nLine -8\nLine -7\nLine -6\nLine -5\nLine -4\nLine -3\nLine -2\nLine -1\n'
E           
E           OUTPUT:
E             expected: 'Line -6\nLine -5\nLine -4\nLine -3\nLine -2\nLine -1\n'
E             actual:   'Line -9\nLine -8\nLine -7\nLine -6\nLine -5\nLine -4\nLine -3\nLine -2\nLine -1\n'
E           
E           COMMAND:
E           sed -e ':loop' -e '$q' -e '/^\([^\n]*\n\)\{5\}/D' -e 'N' -e 'b loop'
E           
E           STDIN:
E           Line -9
E           Line -8
E           Line -7
E           Line -6
E           Line -5
E           Line -4
E           Line -3
E           Line -2
E           Line -1
E           
E           
E           ---
E           
E           Output mismatch:
E             expected: 'Line 1 AAAA\nLine 2 BBBB\nLine 3 CCCC\n'
E             actual:   'Line 1 AAAA\nLine 2 BBBB\nLine 3 CCCC\nLine 4 DDDD\nLine 5 EEEE\nLine 6 FFFF\nLine 7 GGGG\nLine 8 HHHH\nLine 9 IIII\n'
E           
E           OUTPUT:
E             expected: 'Line 1 AAAA\nLine 2 BBBB\nLine 3 CCCC\n'
E             actual:   'Line 1 AAAA\nLine 2 BBBB\nLine 3 CCCC\nLine 4 DDDD\nLine 5 EEEE\nLine 6 FFFF\nLine 7 GGGG\nLine 8 HHHH\nLine 9 IIII\n'
E           
E           COMMAND:
E           sed -e ':loop' -e '1,5{' -e '$d' -e 'N' -e 'b loop' -e '}' -e '$d' -e 'N' -e 'P' -e 'D'
E           
E           STDIN:
E           Line 1 AAAA
E           Line 2 BBBB
E           Line 3 CCCC
E           Line 4 DDDD
E           Line 5 EEEE
E           Line 6 FFFF
E           Line 7 GGGG
E           Line 8 HHHH
E           Line 9 IIII
E           
E           
E           ---
E           
E           Output mismatch:
E             expected: 'Line 10\nLine 9\nLine 8\nLine 2\nLine 1\n'
E             actual:   ''
E           
E           OUTPUT:
E             expected: 'Line 10\nLine 9\nLine 8\nLine 2\nLine 1\n'
E             actual:   ''
E           STDERR:
E             'sed: unknown command: #\n'
E           
E           COMMAND:
E           sed -e '#r' -e ':loop' -e '1,6{' -e '$b last' -e 'N' -e 'b loop' -e '}' -e '$!{' -e 'N' -e 'P' -e 'D' -e '}' -e ': last' -e 's/^.*\n([^\n]*(\n[^\n]*){1})$/\1/'
E           
E           STDIN:
E           Line 10
E           Line 9
E           Line 8
E           Line 7
E           Line 6
E           Line 5
E           Line 4
E           Line 3
E           Line 2
E           Line 1
E           
E           
E           ... and 28 more failures

tests/spec_tests/test_spec.py:323: Failed
___________________ test_sed_spec_file[pythonsed-unit.suite] ___________________

test_file = 'pythonsed-unit.suite'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_sed_test_files())
    async def test_sed_spec_file(test_file: str):
        """Run all tests in a sed spec file."""
        file_path = SED_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_sed_test_file(content, str(file_path))
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_sed_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_sed_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           pythonsed-unit.suite: 22 passed, 76 failed, 0 skipped
E           ============================================================
E           
E           Expected error but got success with output: 'a\n'
E           
E           OUTPUT:
E             expected: ''
E             actual:   'a\n'
E           
E           COMMAND:
E           sed '1'
E           
E           STDIN:
E           a
E           
E           
E           ---
E           
E           Expected error but got success with output: 'a\n'
E           
E           OUTPUT:
E             expected: ''
E             actual:   'a\n'
E           
E           COMMAND:
E           sed '1,2'
E           
E           STDIN:
E           a
E           
E           
E           ---
E           
E           Output mismatch:
E             expected: 'foo # no comment inside i argument\nbar ; nor separator inside i argument\negg } nor end of block inside i argument\nfoo # no comment inside c argument\nfoo # no comment inside a argument\nbar ; nor separator inside a argument\negg } nor end of block inside a argument\n'
E             actual:   ''
E           
E           OUTPUT:
E             expected: 'foo # no comment inside i argument\nbar ; nor separator inside i argument\negg } nor end of block inside i argument\nfoo # no comment inside c argument\nfoo # no comment inside a argument\nbar ; nor separator inside a argument\negg } nor end of block inside a argument\n'
E             actual:   ''
E           STDERR:
E             'sed: unknown command: #\n'
E           
E           COMMAND:
E           sed -e '#n' -e 'i\' -e 'foo # no comment inside i argument' -e 'i\' -e 'bar ; nor separator inside i argument' -e 'i\' -e 'egg } nor end of block inside i argument' -e 'a\' -e 'foo # no comment inside a argument' -e 'a\' -e 'bar ; nor separator inside a argument' -e 'a\' -e 'egg } nor end of block inside a argument' -e 'c\' -e 'foo # no comment inside c argument' -e 'c\' -e 'bar ; nor separator inside c argument' -e 'c\' -e 'egg } nor end of block inside c argument'
E           
E           STDIN:
E           x
E           
E           
E           ... and 73 more failures

tests/spec_tests/test_spec.py:323: Failed
__________________________ test_awk_spec_file[T.-f-f] __________________________

test_file = 'T.-f-f'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.-f-f: 0 passed, 1 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: '\n\n\n\n]'
E             actual:   '/a/\n/a/'
E           
E           OUTPUT:
E             expected: '\n\n\n\n]'
E             actual:   '/a/\n/a/\n'
E           
E           PROGRAM:
E           /a/' /etc/passwd >foo2
E           diff foo1 foo2 || echo 'BAD: T.-f-f  -f -'
E           
E           
E           cp /etc/passwd foo1
E           echo '/./ {
E           
E           INPUT:
E           /a/

tests/spec_tests/test_spec.py:364: Failed
__________________________ test_awk_spec_file[T.argv] __________________________

test_file = 'T.argv'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.argv: 1 passed, 9 failed, 2 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: '5\n$awk\na\nbc\ndef'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '5\n$awk\na\nbc\ndef\n'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           
E           BEGIN {
E           	print ARGC
E           	ARGV[ARGC-1] = ""
E           	for (i=0; i < ARGC; i++)
E           		print ARGV[i]
E           	exit
E           }
E           
E           
E           ---
E           
E           Output mismatch:
E             expected: 'foo1\nfoo2\nfoo3'
E             actual:   ''
E           
E           OUTPUT:
E             expected: 'foo1\nfoo2\nfoo3'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           {print L $0}
E           
E           ---
E           
E           Output mismatch:
E             expected: 'foo1\nfoo2\nfoo3'
E             actual:   ''
E           
E           OUTPUT:
E             expected: 'foo1\nfoo2\nfoo3'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           {print L $0}
E           
E           ... and 6 more failures

tests/spec_tests/test_spec.py:364: Failed
________________________ test_awk_spec_file[T.builtin] _________________________

test_file = 'T.builtin'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.builtin: 3 passed, 2 failed, 1 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: '2'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '2'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           BEGIN { print index(123, substr(123, 2)) }
E           
E           ---
E           
E           Output mismatch:
E             expected: '3.14159 0.000 0.000 3.14159 10.000'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '3.14159 0.000 0.000 3.14159 10.000'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           BEGIN {
E           	pi = 2 * atan2(1, 0)
E           	printf("%.5f %.3f %.3f %.5f %.3f\n",
E           		pi, sin(pi), cos(pi/2), exp(log(pi)), log(exp(10)))
E           }

tests/spec_tests/test_spec.py:364: Failed
_________________________ test_awk_spec_file[T.close] __________________________

test_file = 'T.close'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.close: 1 passed, 4 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: '0'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '0'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           	# non-accessible file
E             BEGIN { getline <"/etc/passwd"; print close("/etc/passwd"); }
E           
E           
E           ---
E           
E           Output mismatch:
E             expected: '-1'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '-1'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           	# file not opened
E             BEGIN { print close("glotch"); }
E           
E           
E           ---
E           
E           Output mismatch:
E             expected: '0'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '0'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           	# normal close
E             BEGIN { print "hello" > "foo"; print close("foo"); }
E           
E           
E           ... and 1 more failures

tests/spec_tests/test_spec.py:364: Failed
__________________________ test_awk_spec_file[T.clv] ___________________________

test_file = 'T.clv'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.clv: 3 passed, 10 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: '0\n0 hello'
E             actual:   '0\n0'
E           
E           OUTPUT:
E             expected: '0\n0 hello'
E             actual:   '0\n0 \n'
E           
E           PROGRAM:
E           
E           BEGIN { x=0; print x; getline; print x, $0 }
E           
E           
E           INPUT:
E           hello
E           goodbye
E           
E           ---
E           
E           Output mismatch:
E             expected: ''
E             actual:   '0\n0\nhello\nhello\ngoodbye\ngoodbye'
E           
E           OUTPUT:
E             expected: ''
E             actual:   '0\n0 \nhello\nhello\ngoodbye\ngoodbye\n'
E           
E           PROGRAM:
E           
E           BEGIN { x=0; print x; getline; print x, $0 }
E           ' x=1 >foo1
E           echo '0
E           1 hello
E           
E           INPUT:
E           hello
E           goodbye
E           
E           ---
E           
E           Output mismatch:
E             expected: ''
E             actual:   '0\n0\nhello\nhello\ngoodbye\ngoodbye'
E           
E           OUTPUT:
E             expected: ''
E             actual:   '0\n0 \nhello\nhello\ngoodbye\ngoodbye\n'
E           
E           PROGRAM:
E           
E           BEGIN { x=0; print x; getline; print x, $0 }
E           ' x=1 x=2 x=3 >foo1
E           echo '0
E           3 hello
E           
E           INPUT:
E           hello
E           goodbye
E           
E           ... and 7 more failures

tests/spec_tests/test_spec.py:364: Failed
________________________ test_awk_spec_file[T.csconcat] ________________________

test_file = 'T.csconcat'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.csconcat: 0 passed, 1 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: 'abcdef aaa\nhelloworld\n\n hello\nhello\nhello world\nhello world'
E             actual:   ''
E           
E           OUTPUT:
E             expected: 'abcdef aaa\nhelloworld\n\n hello\nhello \nhello world\nhello world'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           
E           BEGIN {
E           	$0 = "aaa"
E           	print "abcdef" " " $0
E           }
E           BEGIN { print "hello" "world"; print helloworld }
E           BEGIN {
E            	print " " "hello"
E            	print "hello" " "
E            	print "hello" " " "world"
E            	print "hello" (" " "world")
E           }

tests/spec_tests/test_spec.py:364: Failed
________________________ test_awk_spec_file[T.exprconv] ________________________

test_file = 'T.exprconv'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.exprconv: 0 passed, 1 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: '1\n0\n1\n0\n1\n1'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '1\n0\n1\n0\n1\n1'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           
E           BEGIN {	x = (1 > 0); print x
E           	x = (1 < 0); print x
E           	x = (1 == 1); print x
E           	print ("a" >= "b")
E           	print ("b" >= "a")
E           	print (0 == 0.0)
E           	# x = ((1 == 1e0) && (1 == 10e-1) && (1 == .1e2)); print x
E           	exit
E           }

tests/spec_tests/test_spec.py:364: Failed
__________________________ test_awk_spec_file[T.func] __________________________

test_file = 'T.func'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.func: 1 passed, 8 failed, 3 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: '1\n0\n0\n0\n1'
E             actual:   '0\n0\n0\n0\n0'
E           
E           OUTPUT:
E             expected: '1\n0\n0\n0\n1'
E             actual:   '0\n0\n0\n0\n0\n'
E           
E           PROGRAM:
E           
E           # tests whether function returns sensible type bits
E           
E           function assert(cond) { # assertion
E               if (cond) print 1; else print 0
E           }
E           
E           function i(x) { return x }
E           
E           { m=$1; n=i($2); assert(m>n) }
E           
E           
E           INPUT:
E           10 2
E           2 10
E           10 10
E           10 1e1
E           1e1 9
E           
E           ---
E           
E           Output mismatch:
E             expected: 'data: data'
E             actual:   ''
E           
E           OUTPUT:
E             expected: 'data: data'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           
E           function test1(array) { array["test"] = "data" }
E           function test2(array) { return(array["test"]) }
E           BEGIN { test1(foo); print "data: " test2(foo) }
E           
E           
E           ---
E           
E           Output mismatch:
E             expected: 'x'
E             actual:   ''
E           
E           OUTPUT:
E             expected: 'x'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           
E           BEGIN	{ code() }
E           END	{ codeout("x") }
E           function code() { ; }
E           function codeout(ex) { print ex }
E           
E           
E           ... and 5 more failures

tests/spec_tests/test_spec.py:364: Failed
__________________________ test_awk_spec_file[T.gawk] __________________________

test_file = 'T.gawk'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.gawk: 0 passed, 11 failed, 15 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: '1\n1'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '1\n1'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           
E           	BEGIN { # foo[10] = 0		# put this line in and it will work
E           		test(foo); print foo[1]
E           		test2(foo2); print foo2[1]
E           	}
E           	function test(foo) { test2(foo) }
E           	function test2(bar) { bar[1] = 1 }
E           
E           
E           ---
E           
E           Output mismatch:
E             expected: 'hello               world'
E             actual:   ''
E           
E           OUTPUT:
E             expected: 'hello               world'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           BEGIN { printf "%*sworld\n", -20, "hello" }
E           
E           ---
E           
E           Output mismatch:
E             expected: '0 1'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '0 1'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           BEGIN {
E           	bool = ((b = 1) in c);
E           	print bool, b	# gawk-3.0.1 prints "0 "; should print "0 1"
E           }
E           
E           ... and 8 more failures

tests/spec_tests/test_spec.py:364: Failed
________________________ test_awk_spec_file[T.getline] _________________________

test_file = 'T.getline'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.getline: 1 passed, 2 failed, 3 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: 'false false equal'
E             actual:   ''
E           
E           OUTPUT:
E             expected: 'false false equal'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           BEGIN {
E           	"echo 0" | getline
E           	if ($0) printf "true " 
E           	else printf "false "
E           	if ($1) printf "true " 
E           	else printf "false "
E           	if ($0==$1) printf "equal\n"
E           	else printf "not equal\n"
E           }
E           
E           ---
E           
E           Output mismatch:
E             expected: 'new stuff'
E             actual:   'new'
E           
E           OUTPUT:
E             expected: 'new stuff'
E             actual:   'new\n'
E           
E           PROGRAM:
E           BEGIN { $0="old stuff"; $1="new"; getline x; print}
E           
E           INPUT:
E           L1
E           L2

tests/spec_tests/test_spec.py:364: Failed
_________________________ test_awk_spec_file[T.latin1] _________________________

test_file = 'T.latin1'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
>       content = file_path.read_text()
                  ^^^^^^^^^^^^^^^^^^^^^

tests/spec_tests/test_spec.py:336: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
../../../.asdf/installs/python/3.12.4/lib/python3.12/pathlib.py:1028: in read_text
    return f.read()
           ^^^^^^^^
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <encodings.utf_8.IncrementalDecoder object at 0x10c738260>
input = b'echo T.latin1: tests of 8-bit input\n\nawk=${awk-../a.out}\n\n$awk \'\n{ print $0 }\n\' latin1 >foo1\n\ndiff latin1 ...r a file containing just \xe1.\' >foo1\n$awk \'/[\xe1\xe9]/\' foo0 >foo2\ndiff foo1 foo2 || echo \'BAD: T.latin1 7\'\n'
final = True

>   ???
E   UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe9 in position 204: invalid continuation byte

<frozen codecs>:322: UnicodeDecodeError
__________________________ test_awk_spec_file[T.misc] __________________________

test_file = 'T.misc'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.misc: 15 passed, 19 failed, 17 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: '12345678901'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '12345678901'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           length($0) > 10
E           
E           ---
E           
E           Output mismatch:
E             expected: '012x45'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '012x45'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           BEGIN { print "0\061\62x\0645" }
E           
E           ---
E           
E           Output mismatch:
E             expected: '1\n1'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '1\n1'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           BEGIN {x = 1; print x; x = x; print x}
E           
E           ... and 16 more failures

tests/spec_tests/test_spec.py:364: Failed
________________________ test_awk_spec_file[T.overflow] ________________________

test_file = 'T.overflow'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.overflow: 3 passed, 4 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: '500\n500'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '500\n500'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           
E           BEGIN {
E           	for (j = 0; j < 2; j++) {
E           		for (i = 0; i < 500; i++)
E           			printf(" 123456789")
E           		printf("\n");
E           	}
E           } 
E           
E           ---
E           
E           Output mismatch:
E             expected: '4000004'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '4000004'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           BEGIN {
E           	for (i = 1; i < 1000; i++) s = s "a-z"
E           	if ("x" ~ "[" s "]")
E           		print "ugh"
E           }
E           
E           ---
E           
E           Output mismatch:
E             expected: '0'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '0'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           
E           BEGIN {
E           	for (i = 0; i < 100000; i++)
E           		x[i] = i
E           	for (i in x)
E           		delete x[i]
E           	n = 0
E           	for (i in x)
E           		n++
E           	print n
E           }
E           
E           ... and 1 more failures

tests/spec_tests/test_spec.py:364: Failed
________________________ test_awk_spec_file[T.recache] _________________________

test_file = 'T.recache'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.recache: 0 passed, 1 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: 'b'
E             actual:   ''
E           
E           OUTPUT:
E             expected: 'b'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           
E           BEGIN {
E                   #
E                   # Fill up DFA cache with run-time REs that have all been
E                   # used twice.
E                   #
E                   CACHE_SIZE=64
E                   for(i = 0; i < CACHE_SIZE; i++) {
E                           for(j = 0; j < 2; j++) {
E                                   "" ~ i "";
E                           }
E                   }
E                   #
E                   # Now evalutate an expression that uses two run-time REs
E                   # that have never been used before.  The second RE will
E                   # push the first out of the cache while the first RE is 
E                   # still needed.
E                   #
E                   x = "a"
E                   reg1 = "[Aa]"
E                   reg2 = "A"
E                   sub(reg1, x ~ reg2 ? "B" : "b", x)
E           
E                   print x
E           }

tests/spec_tests/test_spec.py:364: Failed
_________________________ test_awk_spec_file[T.redir] __________________________

test_file = 'T.redir'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.redir: 2 passed, 2 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: '   head\n1\n2'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '   head\n1\n2'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           { print >"foo" }
E           
E           ---
E           
E           Output mismatch:
E             expected: '   head\n1\n2'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '   head\n1\n2'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           BEGIN { print "   head"
E           	for (i = 1; i < 3; i++)
E           		print i | "sort" }

tests/spec_tests/test_spec.py:364: Failed
_________________________ test_awk_spec_file[T.split] __________________________

test_file = 'T.split'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.split: 1 passed, 10 failed, 3 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: '- a 3\n3 1 4'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '- a 3\n3 1 4'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           BEGIN {
E           	# Assign string to $0, then change FS.
E           	FS = ":"
E           	$0="a:bc:def"
E           	FS = "-"
E           	print FS, $1, NF
E           
E           	# Assign number to $0, then change FS.
E           	FS = "2"
E           	$0=1212121
E           	FS="3"
E           	print FS, $1, NF
E           }
E           
E           ---
E           
E           Output mismatch:
E             expected: '- a 3'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '- a 3'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           BEGIN {
E           	# FS changes after getline.
E           	FS = ":"
E           	"echo a:bc:def" | getline
E           	FS = "-"
E           	print FS, $1, NF
E           }
E           
E           ---
E           
E           Output mismatch:
E             expected: '0\n1\n2\n3\n4'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '0\n1\n2\n3\n4'
E             actual:   ''
E           
E           PROGRAM:
E           BEGIN {
E           	FS = ":"
E           	while (getline <"
E           
E           INPUT:
E           
E           a
E           a:b
E           c:d:e
E           e:f:g:h
E           
E           ... and 7 more failures

tests/spec_tests/test_spec.py:364: Failed
_________________________ test_awk_spec_file[T.system] _________________________

test_file = 'T.system'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_awk_test_files())
    async def test_awk_spec_file(test_file: str):
        """Run all tests in an awk spec file."""
        file_path = AWK_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_awk_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_awk_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_awk_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           T.system: 0 passed, 1 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: '3\n4'
E             actual:   ''
E           
E           OUTPUT:
E             expected: '3\n4'
E             actual:   ''
E           STDERR:
E             'bash: /dev/null: No such file or directory\n'
E           
E           PROGRAM:
E           BEGIN {
E           	n = system("exit 3")
E           	print n
E           	exit n+1
E           }

tests/spec_tests/test_spec.py:364: Failed
________________________ test_jq_spec_file[base64.test] ________________________

test_file = 'base64.test'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_jq_test_files())
    async def test_jq_spec_file(test_file: str):
        """Run all tests in a jq spec file."""
        file_path = JQ_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_jq_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_jq_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_jq_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           base64.test: 7 passed, 3 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: ['"qixbaz\\n"']
E             actual:   ['null']
E           
E           OUTPUT:
E             expected: ['"qixbaz\\n"']
E             actual:   ['null']
E           
E           PROGRAM:
E           @base64d
E           
E           INPUT:
E           "cWl4YmF6Cg"
E           
E           ---
E           
E           Output mismatch:
E             expected: ['"string (\\"Not base64...) is not valid base64 data"']
E             actual:   ['null']
E           
E           OUTPUT:
E             expected: ['"string (\\"Not base64...) is not valid base64 data"']
E             actual:   ['null']
E           
E           PROGRAM:
E           . | try @base64d catch .
E           
E           INPUT:
E           "Not base64 data"
E           
E           ---
E           
E           Output mismatch:
E             expected: ['"string (\\"QUJDa\\") trailing base64 byte found"']
E             actual:   ['null']
E           
E           OUTPUT:
E             expected: ['"string (\\"QUJDa\\") trailing base64 byte found"']
E             actual:   ['null']
E           
E           PROGRAM:
E           . | try @base64d catch .
E           
E           INPUT:
E           "QUJDa"

tests/spec_tests/test_spec.py:405: Failed
__________________________ test_jq_spec_file[jq.test] __________________________

test_file = 'jq.test'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_jq_test_files())
    async def test_jq_spec_file(test_file: str):
        """Run all tests in a jq spec file."""
        file_path = JQ_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_jq_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_jq_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_jq_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           jq.test: 232 passed, 289 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: ['-1']
E             actual:   []
E           
E           OUTPUT:
E             expected: ['-1']
E             actual:   []
E           STDERR:
E             'jq: Unknown option: -1\n'
E           
E           PROGRAM:
E           -1
E           
E           INPUT:
E           null
E           
E           ---
E           
E           Output mismatch:
E             expected: ['"byte order mark"']
E             actual:   []
E           
E           OUTPUT:
E             expected: ['"byte order mark"']
E             actual:   []
E           STDERR:
E             'jq: parse error: Expecting value: line 1 column 1 (char 0)\n'
E           
E           PROGRAM:
E           .
E           
E           INPUT:
E           ﻿"byte order mark"
E           
E           ---
E           
E           Output mismatch:
E             expected: ['"Aa\\u000d\\u000a\\u0009\\u0008\\u000c\\u03bc"']
E             actual:   ['"Aa\\r\\n\\tbfu03bc"']
E           
E           OUTPUT:
E             expected: ['"Aa\\u000d\\u000a\\u0009\\u0008\\u000c\\u03bc"']
E             actual:   ['"Aa\\r\\n\\tbfu03bc"']
E           
E           PROGRAM:
E           "Aa\r\n\t\b\f\u03bc"
E           
E           INPUT:
E           null
E           
E           ... and 286 more failures

tests/spec_tests/test_spec.py:405: Failed
_________________________ test_jq_spec_file[man.test] __________________________

test_file = 'man.test'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_jq_test_files())
    async def test_jq_spec_file(test_file: str):
        """Run all tests in a jq spec file."""
        file_path = JQ_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_jq_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_jq_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_jq_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           man.test: 146 passed, 84 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: ['true']
E             actual:   []
E           
E           OUTPUT:
E             expected: ['true']
E             actual:   []
E           STDERR:
E             'jq: error: Unknown function: have_decnum\n'
E           
E           PROGRAM:
E           [., tojson] == if have_decnum then [12345678909876543212345,"12345678909876543212345"] else [12345678909876543000000,"12345678909876543000000"] end
E           
E           INPUT:
E           12345678909876543212345
E           
E           ---
E           
E           Output mismatch:
E             expected: ['true']
E             actual:   []
E           
E           OUTPUT:
E             expected: ['true']
E             actual:   []
E           STDERR:
E             'jq: error: Unknown function: have_decnum\n'
E           
E           PROGRAM:
E           [1234567890987654321,-1234567890987654321 | tojson] == if have_decnum then ["1234567890987654321","-1234567890987654321"] else ["1234567890987654400","-1234567890987654400"] end
E           
E           INPUT:
E           null
E           
E           ---
E           
E           Output mismatch:
E             expected: ['true']
E             actual:   []
E           
E           OUTPUT:
E             expected: ['true']
E             actual:   []
E           STDERR:
E             'jq: error: Unknown function: have_decnum\n'
E           
E           PROGRAM:
E           map([., . == 1]) | tojson == if have_decnum then "[[1,true],[1.000,true],[1.0,true],[1.00,true]]" else "[[1,true],[1,true],[1,true],[1,true]]" end
E           
E           INPUT:
E           [1, 1.000, 1.0, 100e-2]
E           
E           ... and 81 more failures

tests/spec_tests/test_spec.py:405: Failed
_______________________ test_jq_spec_file[manonig.test] ________________________

test_file = 'manonig.test'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_jq_test_files())
    async def test_jq_spec_file(test_file: str):
        """Run all tests in a jq spec file."""
        file_path = JQ_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_jq_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_jq_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_jq_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           manonig.test: 5 passed, 14 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: ['{"offset": 0, "length": 3, "string": "abc", "captures": [{"offset": 0, "length": 3, "string": "abc", "name": null}]}', '{"offset": 4, "length": 3, "string": "abc", "captures": [{"offset": 4, "length": 3, "string": "abc", "name": null}]}']
E             actual:   ['{"offset":0,"length":3,"string":"abc","captures":[{"offset":0,"length":3,"string":"abc","name":null}]}']
E           
E           OUTPUT:
E             expected: ['{"offset": 0, "length": 3, "string": "abc", "captures": [{"offset": 0, "length": 3, "string": "abc", "name": null}]}', '{"offset": 4, "length": 3, "string": "abc", "captures": [{"offset": 4, "length": 3, "string": "abc", "name": null}]}']
E             actual:   ['{"offset":0,"length":3,"string":"abc","captures":[{"offset":0,"length":3,"string":"abc","name":null}]}']
E           
E           PROGRAM:
E           match("(abc)+"; "g")
E           
E           INPUT:
E           "abc abc"
E           
E           ---
E           
E           Output mismatch:
E             expected: ['{"offset": 0, "length": 3, "string": "foo", "captures": []}', '{"offset": 8, "length": 3, "string": "FOO", "captures": []}']
E             actual:   ['{"offset":0,"length":1,"string":"f","captures":[]}']
E           
E           OUTPUT:
E             expected: ['{"offset": 0, "length": 3, "string": "foo", "captures": []}', '{"offset": 8, "length": 3, "string": "FOO", "captures": []}']
E             actual:   ['{"offset":0,"length":1,"string":"f","captures":[]}']
E           
E           PROGRAM:
E           match(["foo", "ig"])
E           
E           INPUT:
E           "foo bar FOO"
E           
E           ---
E           
E           Output mismatch:
E             expected: ['{"offset": 0, "length": 11, "string": "foo bar foo", "captures": [{"offset": 4, "length": 3, "string": "bar", "name": "bar123"}]}', '{"offset": 12, "length": 8, "string": "foo  foo", "captures": [{"offset": -1, "length": 0, "string": null, "name": "bar123"}]}']
E             actual:   ['null']
E           
E           OUTPUT:
E             expected: ['{"offset": 0, "length": 11, "string": "foo bar foo", "captures": [{"offset": 4, "length": 3, "string": "bar", "name": "bar123"}]}', '{"offset": 12, "length": 8, "string": "foo  foo", "captures": [{"offset": -1, "length": 0, "string": null, "name": "bar123"}]}']
E             actual:   ['null']
E           
E           PROGRAM:
E           match("foo (?<bar123>bar)? foo"; "ig")
E           
E           INPUT:
E           "foo bar foo foo  foo"
E           
E           ... and 11 more failures

tests/spec_tests/test_spec.py:405: Failed
_________________________ test_jq_spec_file[onig.test] _________________________

test_file = 'onig.test'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_jq_test_files())
    async def test_jq_spec_file(test_file: str):
        """Run all tests in a jq spec file."""
        file_path = JQ_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_jq_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_jq_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_jq_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           onig.test: 16 passed, 31 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: ['[{"offset":0,"length":0,"string":"","captures":[{"offset":-1,"string":null,"length":0,"name":null}]},{"offset":1,"length":0,"string":"","captures":[{"offset":-1,"string":null,"length":0,"name":null}]},{"offset":2,"length":0,"string":"","captures":[{"offset":-1,"string":null,"length":0,"name":null}]},{"offset":3,"length":0,"string":"","captures":[{"offset":-1,"string":null,"length":0,"name":null}]}]']
E             actual:   ['[{"offset":0,"length":0,"string":"","captures":[]}]']
E           
E           OUTPUT:
E             expected: ['[{"offset":0,"length":0,"string":"","captures":[{"offset":-1,"string":null,"length":0,"name":null}]},{"offset":1,"length":0,"string":"","captures":[{"offset":-1,"string":null,"length":0,"name":null}]},{"offset":2,"length":0,"string":"","captures":[{"offset":-1,"string":null,"length":0,"name":null}]},{"offset":3,"length":0,"string":"","captures":[{"offset":-1,"string":null,"length":0,"name":null}]}]']
E             actual:   ['[{"offset":0,"length":0,"string":"","captures":[]}]']
E           
E           PROGRAM:
E           [match("( )*"; "g")]
E           
E           INPUT:
E           "abc"
E           
E           ---
E           
E           Output mismatch:
E             expected: ['[]']
E             actual:   ['[{"offset":0,"length":0,"string":"","captures":[]}]']
E           
E           OUTPUT:
E             expected: ['[]']
E             actual:   ['[{"offset":0,"length":0,"string":"","captures":[]}]']
E           
E           PROGRAM:
E           [match("( )*"; "gn")]
E           
E           INPUT:
E           "abc"
E           
E           ---
E           
E           Output mismatch:
E             expected: ['[{"offset":0,"length":0,"string":"","captures":[]},{"offset":1,"length":0,"string":"","captures":[]},{"offset":2,"length":0,"string":"","captures":[]}]']
E             actual:   ['[{"offset":0,"length":0,"string":"","captures":[]}]']
E           
E           OUTPUT:
E             expected: ['[{"offset":0,"length":0,"string":"","captures":[]},{"offset":1,"length":0,"string":"","captures":[]},{"offset":2,"length":0,"string":"","captures":[]}]']
E             actual:   ['[{"offset":0,"length":0,"string":"","captures":[]}]']
E           
E           PROGRAM:
E           [match(""; "g")]
E           
E           INPUT:
E           "ab"
E           
E           ... and 28 more failures

tests/spec_tests/test_spec.py:405: Failed
_______________________ test_jq_spec_file[optional.test] _______________________

test_file = 'optional.test'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_jq_test_files())
    async def test_jq_spec_file(test_file: str):
        """Run all tests in a jq spec file."""
        file_path = JQ_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_jq_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_jq_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_jq_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           optional.test: 0 passed, 2 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: ['2147483648']
E             actual:   []
E           
E           OUTPUT:
E             expected: ['2147483648']
E             actual:   []
E           STDERR:
E             'jq: error: Unknown function: fromdate\n'
E           
E           PROGRAM:
E           fromdate
E           
E           INPUT:
E           "2038-01-19T03:14:08Z"
E           
E           ---
E           
E           Output mismatch:
E             expected: ['"Tuesday, June 30, 2015"']
E             actual:   []
E           
E           OUTPUT:
E             expected: ['"Tuesday, June 30, 2015"']
E             actual:   []
E           STDERR:
E             'jq: error: Unknown function: strftime\n'
E           
E           PROGRAM:
E           strftime("%A, %B %e, %Y")
E           
E           INPUT:
E           1435677542.822351

tests/spec_tests/test_spec.py:405: Failed
_________________________ test_jq_spec_file[uri.test] __________________________

test_file = 'uri.test'

    @pytest.mark.asyncio
    @pytest.mark.parametrize("test_file", get_jq_test_files())
    async def test_jq_spec_file(test_file: str):
        """Run all tests in a jq spec file."""
        file_path = JQ_CASES_DIR / test_file
        content = file_path.read_text()
        parsed = parse_jq_test_file(content, str(file_path))
    
        # Skip files with no parseable tests
        if not parsed.test_cases:
            pytest.skip("No parseable tests")
    
        passed = 0
        failed = 0
        skipped = 0
        failures = []
    
        for test_case in parsed.test_cases:
            result = await run_jq_test_case(test_case)
    
            if result.skipped:
                skipped += 1
            elif result.passed:
                passed += 1
            else:
                failed += 1
                failures.append(format_jq_error(result))
    
        if failures:
            summary = f"\n\n{'='*60}\n{test_file}: {passed} passed, {failed} failed, {skipped} skipped\n{'='*60}\n\n"
            failure_text = "\n\n---\n\n".join(failures[:3])
            if len(failures) > 3:
                failure_text += f"\n\n... and {len(failures) - 3} more failures"
>           pytest.fail(summary + failure_text)
E           Failed: 
E           
E           ============================================================
E           uri.test: 2 passed, 5 failed, 0 skipped
E           ============================================================
E           
E           Output mismatch:
E             expected: ['"<>&\'\\"\\t"']
E             actual:   []
E           
E           OUTPUT:
E             expected: ['"<>&\'\\"\\t"']
E             actual:   []
E           STDERR:
E             'jq: error: Unknown function: @urid\n'
E           
E           PROGRAM:
E           (@uri|@urid)
E           
E           INPUT:
E           "<>&'\"\t"
E           
E           ---
E           
E           Output mismatch:
E             expected: ['"a \\u03bc \\u2230 \\ud83d\\ude0e"']
E             actual:   []
E           
E           OUTPUT:
E             expected: ['"a \\u03bc \\u2230 \\ud83d\\ude0e"']
E             actual:   []
E           STDERR:
E             'jq: error: Unknown function: @urid\n'
E           
E           PROGRAM:
E           @urid
E           
E           INPUT:
E           "a%20%CE%BC%20%E2%88%B0%20%F0%9F%98%8E"
E           
E           ---
E           
E           Output mismatch:
E             expected: ['"string (\\"%F0%93%81\\") is not a valid uri encoding"']
E             actual:   ['"%F0%93%81"']
E           
E           OUTPUT:
E             expected: ['"string (\\"%F0%93%81\\") is not a valid uri encoding"']
E             actual:   ['"%F0%93%81"']
E           
E           PROGRAM:
E           . | try @urid catch .
E           
E           INPUT:
E           "%F0%93%81"
E           
E           ... and 2 more failures

tests/spec_tests/test_spec.py:405: Failed
=============================== warnings summary ===============================
tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-context.test.sh::Multiple right brackets inside expression[L12]]
tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-context.test.sh::Multiple right brackets inside expression[L12]]
tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-context.test.sh::Multiple right brackets inside expression[L12]]
tests/spec_tests/test_spec.py::test_bash_spec_file[arith-context.test.sh]
tests/spec_tests/test_spec.py::test_bash_spec_file[arith-context.test.sh]
tests/spec_tests/test_spec.py::test_bash_spec_file[arith-context.test.sh]
  <string>:1: SyntaxWarning: 'int' object is not subscriptable; perhaps you missed a comma?

tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::PatSub of unescaped [[] and []][L211]]
tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::single quotes work inside character classes[L269]]
  /Users/dbreunig/Development/cmpnd/just-bash-py/src/just_bash/interpreter/expansion.py:933: FutureWarning: Possible nested set at position 1
    return re.sub(regex_pattern, replacement, value)

tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globstar.test.sh::within braces, globstar works when there is a comma[L51]]
tests/spec_tests/test_spec.py::test_sed_spec_file[busybox-sed.tests]
  /Users/dbreunig/Development/cmpnd/just-bash-py/src/just_bash/commands/sed/sed.py:421: FutureWarning: Possible nested set at position 1
    compiled = re.compile(pattern, regex_flags)

tests/spec_tests/test_spec.py::test_grep_spec_file[busybox-grep.tests]
tests/spec_tests/test_spec.py::test_grep_spec_file[gnu-ere.tests]
tests/spec_tests/test_spec.py::test_grep_spec_file[gnu-spencer2.tests]
  /Users/dbreunig/Development/cmpnd/just-bash-py/src/just_bash/commands/grep/grep.py:272: FutureWarning: Possible nested set at position 2
    regex = re.compile(pattern, flags)

tests/spec_tests/test_spec.py::test_grep_spec_file[busybox-grep.tests]
  /Users/dbreunig/Development/cmpnd/just-bash-py/src/just_bash/commands/grep/grep.py:272: FutureWarning: Possible nested set at position 25
    regex = re.compile(pattern, flags)

tests/spec_tests/test_spec.py::test_grep_spec_file[gnu-ere.tests]
tests/spec_tests/test_spec.py::test_grep_spec_file[gnu-spencer2.tests]
  /Users/dbreunig/Development/cmpnd/just-bash-py/src/just_bash/commands/grep/grep.py:272: FutureWarning: Possible nested set at position 1
    regex = re.compile(pattern, flags)

tests/spec_tests/test_spec.py::test_grep_spec_file[gnu-ere.tests]
  /Users/dbreunig/Development/cmpnd/just-bash-py/src/just_bash/commands/grep/grep.py:272: FutureWarning: Possible nested set at position 11
    regex = re.compile(pattern, flags)

tests/spec_tests/test_spec.py::test_grep_spec_file[gnu-ere.tests]
  /Users/dbreunig/Development/cmpnd/just-bash-py/src/just_bash/commands/grep/grep.py:272: FutureWarning: Possible nested set at position 12
    regex = re.compile(pattern, flags)

-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
=========================== short test summary info ============================
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Usage of builtins[L10]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::define and use alias on a single line[L40]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::defining multiple aliases, then unalias[L56]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::alias not defined[L77]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::alias with trailing space causes alias expansion on second word[L161]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Recursive alias expansion of SECOND word[L188]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Expansion of alias with variable[L199]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Alias must be an unquoted word, no expansions allowed[L209]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::first and second word are the same alias, but no trailing space[L221]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::first and second word are the same alias, with trailing space[L230]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Invalid syntax of alias[L240]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Syntax error after expansion[L263]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Loop split across alias and arg works[L270]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Loop split across alias in another way[L280]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Loop split across both iterative and recursive aliases[L293]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Alias with a quote in the middle is a syntax error[L313]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Alias with internal newlines[L322]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Alias trailing newline[L335]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Two aliases in pipeline[L357]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Alias not respected inside $()[L366]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Alias can be defined and used on a single line[L373]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::alias with line continuation in the middle[L418]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::alias for left brace[L431]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::alias for left paren[L443]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::alias used in subshell and command sub[L455]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::here doc inside alias[L481]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Corner case: alias inside LHS array arithmetic expression[L494]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Alias that is pipeline[L508]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[alias.test.sh::Alias that is && || ;[L516]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[append.test.sh::error: s+=(my array)[L45]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[append.test.sh::error: myarray+=s[L54]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[append.test.sh::typeset s+=(my array)[L68]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[append.test.sh::error: typeset myarray+=s[L87]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[append.test.sh::error: append used like env prefix[L105]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[append.test.sh::myarray[-1]+=s - Append to last element[L125]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[append.test.sh::typeset s+=[L164]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[append.test.sh::typeset s${dyn}+=[L186]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[append.test.sh::export readonly +=[L210]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[append.test.sh::local +=[L234]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[append.test.sh::assign builtin appending array: declare d+=(d e)[L253]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arg-parse.test.sh::shift 1 extra[L13]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arg-parse.test.sh::continue 1 extra, break, etc.[L29]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-context.test.sh::Multiple right brackets inside expression[L12]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-context.test.sh::Slicing of string with variables[L31]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-context.test.sh::Array index on LHS of assignment[L38]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-context.test.sh::Array index on LHS with indices[L46]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-context.test.sh::Slicing of string with expressions[L53]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-context.test.sh::Ambiguous colon in slice[L65]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-context.test.sh::Triple parens should be disambiguated[L78]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-context.test.sh::Quadruple parens should be disambiguated[L87]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-context.test.sh::Empty expression (( ))  $(( ))[L133]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-context.test.sh::Empty expression in ${a[@]: : }[L162]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-context.test.sh::Empty expression a[][L205]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-dynamic.test.sh::Double quotes[L6]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-dynamic.test.sh::Single quotes[L26]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-dynamic.test.sh::Substitutions[L51]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith-dynamic.test.sh::Variable references[L75]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Side Effect in Array Indexing[L17]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Arith sub with word parts[L48]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Backticks within arith sub[L66]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Integer constant validation[L137]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Increment undefined variables with nounset[L235]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::No floating point[L334]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Dynamic base constants[L370]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Dynamic octal constant[L382]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Dynamic hex constants[L388]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Dynamic var names - result of runtime parse/eval[L397]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Recursive name evaluation is a result of runtime parse/eval[L403]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::nounset with arithmetic[L413]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Invalid LValue: two sets of brackets[L536]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Comment not allowed in the middle of multiline arithmetic[L587]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Double subscript[L631]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::assignment with dynamic var name[L703]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::array assignment with dynamic array name[L712]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::unary assignment with dynamic var name[L727]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::unary array assignment with dynamic var name[L739]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Dynamic parsing of arithmetic[L755]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::1 ? a=1 : b=2 ( bug fix)[L791]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Invalid constant[L805]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::Negative numbers with bit shift[L888]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::undef[0] with nounset[L961]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::s[0] with string 42[L1005]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[arith.test.sh::s[0] with string '12 34'[L1024]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assign.test.sh::Indexed LHS without spaces, and +=[L6]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assign.test.sh::Indexed LHS with spaces[L31]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assign.test.sh::Nested a[i[0]]=0[L46]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assign.test.sh::Multiple LHS array words[L63]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assign.test.sh::LHS array is protected with shopt -s eval_unsafe_arith, e.g. 'a[$(echo 2)]'[L121]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assign.test.sh::file named a[ is  not executed[L161]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assign.test.sh::More fragments like a[  a[5  a[5 +  a[5 + 3][L189]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assign.test.sh::Are quotes allowed?[L267]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assign.test.sh::Tricky parsing - a[ a[0]=1 ]=X  a[ a[0]+=1 ]+=X[L308]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assign.test.sh::argv.py a[1 + 2]=[L335]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assign.test.sh::declare builtin doesn't allow spaces[L362]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::Literal syntax ([x]=y)[L19]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::Can initialize assoc array with the "(key value ...)" sequence[L50]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::retrieve keys with ![L84]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::coerce to string with ${A[*]}, etc.[L114]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::${A[@]/b/B}[L130]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::${A[@]#prefix}[L145]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::lookup with ${a[0]} -- "0" is a string[L185]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::lookup with double quoted strings "mykey"[L195]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::lookup with unquoted $key and quoted "$i$i"[L213]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::lookup by unquoted string doesn't work in OSH because it's a variable[L227]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::bash bug: "i+1" and i+1 are the same key[L238]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::Indexed array as key of associative array coerces to string (without shopt -s strict_array)[L282]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::Append to associative array value A['x']+='suffix'[L299]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::Slice of associative array doesn't make sense in bash[L309]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::setting key to itself (from bash-bug mailing list)[L377]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::readonly associative array can't be modified[L392]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::associative array and brace expansion[L405]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::unset -v and assoc array[L436]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::nameref and assoc array[L476]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::${!ref} and assoc array[L510]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::printf -v and assoc array[L531]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::bash bug: (( A["$key"] = 1 )) doesn't work[L557]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::Implicit increment of keys[L581]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::test -v with dynamic parsing[L633]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::BashAssoc a+=()[L743]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-assoc.test.sh::BashAssoc ${a[@]@Q}[L759]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-compat.test.sh::User arrays decay[L23]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-compat.test.sh::++ on a whole array increments the first element (disallowed with strict_array)[L84]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-compat.test.sh::value.BashArray internal representation - Indexed[L106]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-compat.test.sh::value.BashArray internal representation - Assoc (ordering is a problem)[L158]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-literal.test.sh::Tilde expansions in RHS of [k]=v (BashArray)[L3]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-literal.test.sh::[k]=$v and [k]="$@" (BashArray)[L44]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-literal.test.sh::[k]=$v and [k]="$@" (BashAssoc)[L77]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-literal.test.sh::append to element (BashArray)[L111]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-literal.test.sh::append to element (BashAssoc)[L125]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-literal.test.sh::non-index forms of element (BashAssoc)[L148]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-literal.test.sh::Evaluation order (1)[L166]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-literal.test.sh::Evaluation order (2)[L178]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-literal.test.sh::Evaluation order (3)[L189]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-literal.test.sh::[k1]=v1 (BashArray)[L208]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-literal.test.sh::[k1]=v1 (BashAssoc)[L221]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-literal.test.sh::[k1]=v1 looking like brace expansions (BashAssoc)[L231]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-literal.test.sh::[k1]=v1 looking like brace expansions (BashArray)[L239]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-literal.test.sh::BashArray cannot be changed to BashAssoc and vice versa[L249]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::test "declare -p sp"[L85]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::Negative index with a[i]=v[L188]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::a[i]=v with BigInt[L209]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::Negative out-of-bound index with a[i]=v (1/2)[L236]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::Negative out-of-bound index with a[i]=v (2/2)[L265]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::unset -v a[i] with out-of-bound negative index[L375]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::unset -v a[i] for max index[L405]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::[[ -v a[i] ]][L430]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::[[ -v a[i] ]] with invalid negative index[L479]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::((sp[i])) and ((sp[i]++)) with invalid negative index[L568]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::${sp[i]}[L595]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::${sp[i]} with negative invalid index[L634]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::${a[@]:offset:length}[L675]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::${@:offset:length}[L747]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::${a[@]}[L848]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::${a[@]#...}[L861]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::${a[@]/pat/rep}[L885]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::${a[@]@P}, ${a[@]@Q}, and ${a[@]@a}[L917]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::${a-}[L974]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::${!a[@]}[L1006]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::"${a[*]}"[L1022]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::compgen -F _set_COMPREPLY[L1039]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::Regression: a[-1]=1[L1202]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array-sparse.test.sh::Initializing indexed array with ([index]=value)[L1222]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::local array[L19]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Command with with word splitting in array[L31]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::space before ( in array initialization[L36]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::array with invalid token[L53]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Negative index and sparse array[L84]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Negative index and sparse array[L121]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Length after unset[L142]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Retrieve index that is a command sub[L169]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Interpolate array into array[L229]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Exporting array doesn't do anything, not even first element[L235]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Arrays can't be used as env bindings[L275]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Associative arrays can't be used as env bindings either[L287]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Set element with var ref[L303]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Set element with array ref[L310]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Set array item to array[L319]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Slice of array with [@][L329]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Negative slice begin[L337]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Negative slice length[L347]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Slice with arithmetic[L353]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::glob within array yields separate elements[L381]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Stripping a whole array quoted[L419]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Multiple subscripts not allowed[L427]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Length op, index op, then transform op is not allowed[L436]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::${mystr[@]} and ${mystr[*]} are no-ops[L443]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Create a "user" array out of the argv array[L473]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Brace Expansion within Array[L486]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::array default[L491]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Singleton Array Copy and Assign.  OSH can't index strings with ints[L496]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::declare -a / local -a is empty array[L521]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Append sparse arrays[L568]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Slice of sparse array with [@][L582]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Using an array itself as the index on LHS[L592]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Using an array itself as the index on RHS[L603]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::a[$x$y] on LHS and RHS[L613]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Dynamic parsing of LHS a[$code]=value[L630]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::test -v a[i] with arith expressions[L722]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::More arith expressions in [[ -v array[expr]] ]][L759]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Regression: Assigning with out-of-range negative index[L824]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Regression: Negative index in [[ -v a[index] ]][L847]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Regression: Negative out-of-range index in [[ -v a[index] ]][L869]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Regression: unset a[-2]: out-of-bound negative index should cause error[L908]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Regression: Out-of-bound negative offset for ${a[@]:offset}[L932]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Regression: Array length after unset[L956]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Regression: ${a[@]@Q} crash with `a[0]=x a[2]=y`[L977]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[array.test.sh::Regression: silent out-of-bound negative index in ${a[-2]} and $((a[-2]))[L996]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-deferred.test.sh::typeset -a a[1]=a a[3]=c[L16]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-deferred.test.sh::local a[3]=4[L23]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-deferred.test.sh::is 'builtin' prefix and array allowed?  OSH is smarter[L95]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-deferred.test.sh::is 'command' prefix and array allowed?  OSH is smarter[L106]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-dialects.test.sh::test -v with arrays[L39]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-dialects.test.sh::test -v with assoc arrays[L89]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::declare[L97]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::declare -p[L157]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::declare -p var[L264]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::declare -p arr[L316]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::declare -pnrx[L374]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::declare -paA[L413]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::ble.sh: eval -- "$(declare -p var arr)"[L529]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::declare -p and value.Undef[L555]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::eval -- "$(declare -p arr)" (restore arrays w/ unset elements)[L575]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::declare -p UNDEF (and typeset) -- prints something to stderr[L591]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::typeset -r makes a string readonly[L688]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::typeset -ar makes it readonly[L720]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::Multiple assignments / array assignments on a line[L760]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::syntax error in array assignment[L782]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::declare -g (bash-specific; bash-completion uses it)[L794]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::dynamic flag in array in assign builtin[L854]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::typeset +x[L878]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::typeset +r removes read-only attribute (TODO: documented in bash to do nothing)[L888]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::function name with /[L927]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign-extended.test.sh::unset and shell funcs[L945]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::Escaped = in command name[L113]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::Env binding not allowed before compound command[L119]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::Trying to run keyword 'for'[L127]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::Env binding in readonly/declare is NOT exported!  (pitfall)[L148]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::'local x' does not set variable[L205]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::'local -a x' does not set variable[L216]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::'declare -A' and then dict assignment[L240]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::Reveal existence of "temp frame" (All shells disagree here!!!)[L281]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::Test above without 'local' (which is not POSIX)[L338]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::Using ${x-default} after unsetting a temp binding shadowing a global[L397]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::static assignment doesn't split[L427]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::aliased assignment doesn't split[L442]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::assign and glob[L505]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::declare and glob[L525]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::readonly $x where x='b c'[L538]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::readonly a=(1 2) no_value c=(3 4) makes 'no_value' readonly[L563]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::redirect after assignment builtin (eval redirects after evaluating arguments)[L596]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::redirect after command sub (like case above but without assignment builtin)[L610]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::redirect after bare assignment[L619]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[assign.test.sh::readonly array should not be modified by a+=(1)[L751]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[ble-features.test.sh::[bash_unset] local-unset / dynamic-unset for localvar[L3]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[ble-features.test.sh::[bash_unset] local-unset / dynamic-unset for localvar (mutated from tempenv)[L47]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[ble-features.test.sh::[bash_unset] local-unset / dynamic-unset for tempenv[L95]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[ble-features.test.sh::[bash_unset] function call with tempenv vs tempenv-eval[L128]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[ble-features.test.sh::[bash_unset] localvar-inherit from tempenv[L241]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[ble-features.test.sh::[compat_array] scalar write to arrays[L336]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[ble-features.test.sh::[compat_array] scalar write to associative arrays[L358]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[ble-unset.test.sh::[bash_unset] nested context by tempenv-eval[L7]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[ble-unset.test.sh::[bash_unset] local-unset / dynamic-unset for localvar on nested-context[L37]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[ble-unset.test.sh::[bash_unset] dynamic-unset for nested localvars[L73]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[ble-unset.test.sh::[bash_unset] dynamic-unset for nested tempenvs[L135]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[ble-unset.test.sh::[bash_unset] local-unset for nested tempenvs[L191]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bool-parse.test.sh::test builtin: ( = ) is confusing: equality test or non-empty string test[L44]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bool-parse.test.sh::test builtin: ( == ) is confusing: equality test or non-empty string test[L64]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bool-parse.test.sh::Not allowed: [[ ) ]] and [[ ( ]][L110]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::partial leading expansion[L13]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::double expansion with single and double quotes[L44]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::expansion with mixed quotes[L48]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::double expansion with simple var -- bash bug[L57]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::double expansion with braced variable[L64]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::double expansion with literal and simple var[L70]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::Empty alternative with empty string suffix[L115]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::expansion on RHS of assignment[L134]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::no expansion with RHS assignment[L141]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::Tilde expansion with brace expansion[L157]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::Two kinds of tilde expansion[L199]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::Mixed case char expansion is invalid[L370]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::comma and invalid range (adjacent and nested)[L444]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::OSH provides an alternative to invalid syntax[L461]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::Invalid brace expansions don't expand[L479]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[brace-expansion.test.sh::Invalid brace expansions mixing characters and numbers[L489]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::assign readonly -- one line[L25]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::assign readonly -- multiple lines -- set -o posix[L45]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::First word like foo$x() and foo$[1+2] (regression)[L74]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::Function names[L88]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::file with NUL byte[L105]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::'echo' and printf fail on writing to full disk[L143]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::other builtins fail on writing to full disk[L158]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::for loop (issue #1446)[L191]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::for loop 2 (issue #1446)[L205]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::autoconf word split (#1449)[L218]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::autoconf arithmetic - relaxed eval_unsafe_arith (#1450)[L264]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::command execution $(echo 42 | tee PWNED) not allowed[L277]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::process sub <(echo 42 | tee PWNED) not allowed[L300]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::unset doesn't allow command execution[L324]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::(( status bug[L376]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::autotools as_fn_arith bug in configure[L395]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[bugs.test.sh::Crash in {1..10} - issue #2296[L433]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bash.test.sh::bad help topic[L18]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bash.test.sh::mapfile[L28]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bash.test.sh::readarray (synonym for mapfile)[L47]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bash.test.sh::mapfile (array name): arr[L66]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bash.test.sh::mapfile (delimiter): -d delim[L85]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bash.test.sh::mapfile (delimiter): -d '' (null-separated)[L102]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bash.test.sh::mapfile -t doesn't remove \\r[L135]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bash.test.sh::mapfile (store position): -O start[L170]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bracket.test.sh::four args[L82]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bracket.test.sh::( ) ! -a -o with system version of [[L144]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bracket.test.sh::-x[L244]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bracket.test.sh::-r[L257]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bracket.test.sh::-w[L268]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bracket.test.sh::-k for sticky bit[L280]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bracket.test.sh::test -p named pipe[L407]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bracket.test.sh::-u for setuid, -g too[L442]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bracket.test.sh::-ef[L534]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bracket.test.sh::test -c[L573]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-bracket.test.sh::Looks like octal, but digit is too big[L710]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-cd.test.sh::cd BAD/..[L10]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-cd.test.sh::cd with 2 or more args - with strict_arg_parse[L26]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-cd.test.sh::cd - without OLDPWD[L64]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-cd.test.sh::pwd in symlinked dir on shell initialization[L216]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-cd.test.sh::Test the current directory after 'cd ..' involving symlinks[L237]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-cd.test.sh::cd permits double bare dash[L280]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-cd.test.sh::cd to symlink with -L and -P[L285]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-cd.test.sh::cd to relative path with -L and -P[L306]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-cd.test.sh::CDPATH is respected[L344]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-cd.test.sh::Change directory in non-shell parent process (make or Python)[L367]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-cd.test.sh::What happens when inherited $PWD and current dir disagree?[L409]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::complete with no args and complete -p both print completion spec[L4]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::complete -F f is usage error[L23]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::complete with nonexistent function[L39]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::complete with no action[L45]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::-A function prints functions[L51]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::Invalid syntax[L72]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::how compgen calls completion functions[L77]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::complete -o -F (git)[L100]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compopt with invalid syntax[L106]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -f[L121]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -v with local vars[L137]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -v P[L154]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -e with global/local exported vars[L162]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -e P[L186]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen with actions: function / variable / file[L194]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen with actions: alias, setopt[L207]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen with actions: shopt[L220]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen with action and suffix: helptopic[L226]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -A directory[L232]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -A file[L243]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -A user[L255]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -A command completes external commands[L260]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -A command completes functions and aliases[L269]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -A command completes builtins and keywords[L296]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -k shows the same keywords as bash[L308]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -k completes reserved shell keywords[L375]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::-o filenames and -o nospace have no effect with compgen[L389]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::-o plusdirs and -o dirnames with compgen[L397]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -o default completes files and dirs[L418]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen doesn't respect -X for user-defined functions[L437]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -W words -X filter[L454]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -f -X filter -- $cur[L463]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen doesn't need shell quoting[L478]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -W 'one two three'[L490]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -W evaluates code in $()[L513]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -W uses IFS, and delimiters are escaped with \\[L522]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::Parse errors for compgen -W and complete -W[L531]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -A builtin[L570]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::complete -C vs. compgen -C[L576]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-completion.test.sh::compgen -F with scalar COMPREPLY[L636]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-dirs.test.sh::pushd/popd[L5]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-dirs.test.sh::pushd usage[L26]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-dirs.test.sh::popd usage error[L44]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-dirs.test.sh::popd returns error on empty directory stack[L65]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-dirs.test.sh::cd replaces the lowest entry on the directory stack![L74]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-dirs.test.sh::dirs builtin[L106]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-dirs.test.sh::dirs -c to clear the stack[L114]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-dirs.test.sh::dirs -v to print numbered stack, one entry per line[L131]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-dirs.test.sh::dirs -p to print one entry per line[L154]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-dirs.test.sh::dirs -l to print in long format, no tilde prefix[L173]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-dirs.test.sh::dirs to print using tilde-prefix format[L188]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-dirs.test.sh::dirs test converting true home directory to tilde[L197]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-dirs.test.sh::dirs tilde test when $HOME is exactly $PWD[L214]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-dirs.test.sh::dirs test of path alias `..`[L230]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-dirs.test.sh::dirs test of path alias `.`[L237]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-echo.test.sh::echo -e with 4 digit unicode escape[L175]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-echo.test.sh::echo -e with 8 digit unicode escape[L187]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-echo.test.sh::\\0377 is the highest octal byte[L199]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-echo.test.sh::\\0400 is one more than the highest octal byte[L208]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-echo.test.sh::\\0777 is out of range[L221]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-echo.test.sh::incomplete hex escape[L236]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-echo.test.sh::\\x[L245]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-echo.test.sh::incomplete octal escape[L258]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-echo.test.sh::incomplete unicode escape[L267]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-echo.test.sh::\\u6[L279]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-echo.test.sh::\\0 \\1 \\8[L291]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-echo.test.sh::echo to redirected directory is an error[L306]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-eval-source.test.sh::eval accepts/ignores --[L9]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-eval-source.test.sh::eval usage[L17]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-eval-source.test.sh::eval string with 'break continue return error'[L39]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-eval-source.test.sh::exit within eval (regression)[L135]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-eval-source.test.sh::source accepts/ignores --[L156]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-eval-source.test.sh::Source with syntax error[L222]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-eval-source.test.sh::source looks in PATH for files[L323]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-eval-source.test.sh::source finds files in PATH before current dir[L333]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-eval-source.test.sh::sourcing along PATH should ignore directories[L363]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::getopts sees unknown arg[L10]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::getopts with invalid variable name[L74]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::getopts with invalid flag[L82]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::getopts missing required argument[L111]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::getopts doesn't look for flags after args[L124]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::OPTIND[L155]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::OPTIND after multiple getopts with same spec[L159]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::OPTIND after multiple getopts with different spec[L192]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::two flags: -ab[L304]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::flag and arg: -c10[L318]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::More Smooshing 1[L332]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::More Smooshing 2[L350]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::OPTIND should be >= 1 (regression)[L368]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::getopts bug #1523[L384]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::More regression for #1523[L409]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-getopts.test.sh::getopts normal mode - invalid option (compare with silent)[L444]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-meta-assign.test.sh::builtin declare a=(x y) is allowed[L4]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-meta-assign.test.sh::export, builtin export[L89]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-meta-assign.test.sh::\\command readonly - similar issue[L160]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-meta.test.sh::command -v[L3]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-meta.test.sh::command -v executable, builtin[L40]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-meta.test.sh::command -v with multiple names[L60]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-meta.test.sh::command -v doesn't find non-executable file[L89]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-meta.test.sh::command -v doesn't find executable dir[L116]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-meta.test.sh::command -V[L146]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-meta.test.sh::command -p (override existing program)[L296]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-meta.test.sh::builtin usage[L361]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-misc.test.sh::history builtin usage[L4]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-misc.test.sh::Print shell strings with weird chars: set and printf %q and ${x@Q}[L47]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-misc.test.sh::Print shell strings with normal chars: set and printf %q and ${x@Q}[L88]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf -v a[1][L60]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf -v syntax error[L76]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf -v dynamic scope[L115]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf with too few arguments[L156]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf with too many arguments[L162]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf %6.4d -- "precision" does padding for integers[L198]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf %6.4x X o[L216]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf %6.s and %0.s[L312]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::Invalid UTF-8[L501]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::Too large[L559]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::negative numbers with unsigned / octal / hex[L601]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf backslash escapes[L698]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf octal backslash escapes[L716]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf unicode backslash escapes[L724]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf invalid backslash escape (is ignored)[L736]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf %c unicode - prints the first BYTE of a string - it does not respect UTF-8[L760]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf invalid format[L781]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf %q[L796]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf %6q (width)[L809]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf negative numbers[L826]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf # flag[L914]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::Runtime error for invalid integer[L938]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::%(strftime format)T[L970]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::%(strftime format)T doesn't respect TZ if not exported[L989]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::%(strftime format)T TZ in environ but not in shell's memory[L1012]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::%10.5(strftime format)T[L1033]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::bash truncates long strftime string at 128[L1059]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf positive integer overflow[L1117]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf negative integer overflow[L1199]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf %b does backslash escaping[L1278]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf %b respects \\c early return[L1329]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::printf %b with truncated octal escapes[L1379]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::leading spaces are accepted in value given to %d %X, but not trailing spaces[L1509]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-printf.test.sh::Arbitrary base 64#a is rejected (unlike in shell arithmetic)[L1584]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read builtin with no newline returns status 1[L52]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -n doesn't strip whitespace (bug fix)[L100]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -d -n - respects delimiter and splits[L154]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read without args uses $REPLY, no splitting occurs (without -n)[L231]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read with line continuation reads multiple physical lines[L365]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read multiple vars spanning many lines[L375]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read with IFS=$'\\n'[L415]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read multiple lines with IFS=:[L426]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read with IFS=''[L441]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -a reads into array[L477]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -d : (colon-separated records)[L509]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -d '' (null-separated records)[L530]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -rd[L551]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -d when there's no delimiter[L565]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -t 0 tests if input is available[L582]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -u[L634]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -u -s[L657]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -u 3 -d 5[L670]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -u 3 -d b -N 6[L683]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -N doesn't respect delimiter, while read -n does[L705]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read with smooshed args[L748]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -r -d '' for NUL strings, e.g. find -print0[L757]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::mapfile from directory (bash doesn't handle errors)[L808]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -n and backslash escape[L839]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -n 4 with incomplete backslash[L861]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read -n 4 with backslash + delim[L887]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::"backslash + newline" should be swallowed regardless of "-d <delim>"[L903]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::IFS='x ' read -a: trailing spaces (unlimited split)[L940]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::IFS='x ' read a b: trailing spaces (with max_split)[L964]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::IFS='x ' read -a: intermediate spaces (unlimited split)[L981]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::IFS='x ' incomplete backslash[L1008]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::IFS='\\ ' and backslash escaping[L1019]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::max_split and backslash escaping[L1034]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::IFS=x read a b <<< xxxxxx[L1044]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read and "\\ "[L1102]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-read.test.sh::read bash bug[L1166]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-set.test.sh::set -u with undefined variable exits the interpreter[L44]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-set.test.sh::set -u with undefined var in interactive shell does NOT exit the interpreter[L66]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-set.test.sh::set -u error can break out of nested evals[L101]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-set.test.sh::set -o lists options[L152]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-set.test.sh::'set' and 'eval' round trip[L159]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-set.test.sh::set - - and so forth[L202]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-set.test.sh::set - leading single dash is ignored, turns off xtrace verbose (#2364)[L241]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-set.test.sh::set - stops option processing like set --[L285]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-set.test.sh::A single + is an ignored flag; not an argument[L309]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-set.test.sh::set - + and + -[L344]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-set.test.sh::set +a stops exporting[L376]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-set.test.sh::set -o allexport (long form)[L387]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-set.test.sh::variables set before set -a are not exported[L398]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-special.test.sh::Prefix assignments persist after special builtins, like : (set -o posix)[L30]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-special.test.sh::Prefix assignments persist after readonly, but NOT exported (set -o posix)[L52]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-special.test.sh::Prefix binding for exec is a special case (versus e.g. readonly)[L79]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-special.test.sh::exec without args is a special case of the special case in some shells[L91]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-special.test.sh::Special builtins can't be redefined as shell functions (set -o posix)[L129]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-special.test.sh::bash 'type' gets confused - says 'function', but runs builtin[L228]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-special.test.sh::command, builtin - both can be redefined, not special (regression)[L285]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type -t -> keyword[L27]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type -t doesn't find non-executable (like command -v)[L67]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type -p and -P builtin -> file[L90]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type -a -P gives multiple files[L108]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type -p builtin -> not found[L121]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type -P builtin -> not found[L132]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type -P builtin -> not a file[L138]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type -P builtin -> not a file but file found[L144]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type -f builtin -> function and file exists[L164]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type prints function source code[L179]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type -a -> file[L240]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type -ap -> file; abbreviated[L252]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type -a -> builtin and file[L263]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type -a -> builtin and file and shell function[L275]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type -ap -> builtin and file; doesn't print builtin or function[L316]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type-bash.test.sh::type -P does not find directories (regression)[L341]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type.test.sh::type -> alias external[L17]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type.test.sh::type of relative path[L51]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-type.test.sh::type -> not found[L77]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::Export sets a global variable that persists after export -n[L19]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::Export a local that shadows a global[L116]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::Unset exported variable, then define it again.  It's NOT still exported.[L139]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::Exporting a parent func variable (dynamic scope)[L155]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::can't export array (strict_array)[L193]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::can't export associative array (strict_array)[L213]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::assign to readonly variable[L230]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::Make an existing local variable readonly[L241]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::assign to readonly variable - errexit[L273]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::Unset a function without -f[L314]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::Unset and scope (bug #653)[L342]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::Unset invalid variable name[L408]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::Unset -f[L444]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::Unset wrong type[L495]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::unset -v assoc (related to issue #661)[L548]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::Unset array member with dynamic parsing[L587]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::Use local twice[L602]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::Local without variable is still unset![L615]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::local after readonly[L629]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[builtin-vars.test.sh::unset a[-1] (bf.bash regression)[L654]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[case_.test.sh::Quoted literal in glob pattern[L98]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[case_.test.sh::\\(\\) in pattern (regression)[L211]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[case_.test.sh::case \\n bug regression[L234]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-parsing.test.sh::Redirect on control flow (ignored in OSH)[L41]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-parsing.test.sh::Redirect on control flow with ysh:all (no_parse_ignored)[L55]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub-ksh.test.sh::${ echo hi;}[L6]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub-ksh.test.sh::${ echo hi }  without semi-colon[L43]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub-ksh.test.sh::${|REPLY=hi}[L63]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub-ksh.test.sh::for loop / case[L89]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub.test.sh::case in subshell[L7]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub.test.sh::Nested backticks[L28]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub.test.sh::Making keyword out of command sub should NOT work[L40]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub.test.sh::Command Sub trailing newline removed[L66]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub.test.sh::Command Sub trailing whitespace not removed[L72]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub.test.sh::Command Sub and exit code[L78]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub.test.sh::Double Quotes in Command Sub in Double Quotes[L105]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub.test.sh::Quoting $ within ``[L145]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub.test.sh::Quoting $ within `` within double quotes[L159]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub.test.sh::Quoting non-special characters within ``[L214]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub.test.sh::Quoting non-special characters within `` within double quotes[L226]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub.test.sh::More levels of double quotes in backticks[L248]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command-sub.test.sh::Syntax errors with double quotes within backticks[L268]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command_.test.sh::Command block[L5]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command_.test.sh::Permission denied[L11]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command_.test.sh::Not a dir[L16]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command_.test.sh::Name too long[L20]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command_.test.sh::File with no shebang is executed[L32]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command_.test.sh::File with relative path and no shebang is executed[L40]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command_.test.sh::File in relative subdirectory and no shebang is executed[L48]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command_.test.sh::$PATH lookup[L57]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command_.test.sh::filling $PATH cache, then insert the same command earlier in cache[L70]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command_.test.sh::filling $PATH cache, then deleting command[L101]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command_.test.sh::Non-executable on $PATH[L134]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command_.test.sh::hash without args prints the cache[L154]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command_.test.sh::hash with args[L172]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command_.test.sh::hash -r doesn't allow additional args[L191]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[command_.test.sh::PATH resolution skips directories and non-executables[L198]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dbracket.test.sh::[[ compare with literal -f (compare with test-builtin.test.sh)[L166]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dbracket.test.sh::[[ with op variable (compare with test-builtin.test.sh)[L175]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dbracket.test.sh::[[ at runtime doesn't work[L188]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dbracket.test.sh::[[ with env prefix doesn't work[L193]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dbracket.test.sh::Argument that looks like a real operator[L207]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dbracket.test.sh::-eq does dynamic arithmetic parsing (not supported in OSH)[L267]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dbracket.test.sh::[[ '(' foo ]] is syntax error[L286]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dbracket.test.sh::[[ -z ]] is syntax error[L297]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dbracket.test.sh::[[ -z '>' a ]] is syntax error[L307]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dbracket.test.sh::[[ ]] is syntax error[L318]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dbracket.test.sh::[[ && ]] is syntax error[L325]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dbracket.test.sh::[[ a 3< b ]] doesn't work (bug regression)[L332]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dbracket.test.sh::tilde expansion with =~ (confusing)[L390]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dbracket.test.sh::[[ ]] with redirect[L419]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dbracket.test.sh::\\(\\) in pattern (regression)[L441]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dparen.test.sh::bash: K in (( A[K] = V )) is a constant string[L68]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dparen.test.sh::bash: V in (( A["K"] = V )) gets coerced to integer[L108]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dparen.test.sh::literal strings inside (( ))[L130]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dparen.test.sh::(( )) with redirect[L148]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dparen.test.sh::Assigning whole raray (( b = a ))[L159]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[dparen.test.sh::Example of incrementing associative array entry with var key (ble.sh)[L192]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[empty-bodies.test.sh::Empty do/done[L3]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[empty-bodies.test.sh::Empty then/fi[L17]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[errexit.test.sh::errexit for nonexistent command[L13]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[errexit.test.sh::errexit with { }[L27]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[errexit.test.sh::More && ||[L73]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[errexit.test.sh::errexit and brace group { }[L105]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[errexit.test.sh::errexit with subshell[L164]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[errexit.test.sh::setting errexit in a subshell works but doesn't affect parent shell[L227]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[errexit.test.sh::set errexit while it's ignored in a subshell (moot with strict_errexit)[L240]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[errexit.test.sh::background processes respect errexit[L282]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[errexit.test.sh::pipeline process respects errexit[L291]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[errexit.test.sh::simple command / assign - redir failure DOES respect errexit[L308]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[errexit.test.sh::bash atoms [[ (( - redir failure checked[L369]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[errexit.test.sh::brace group - redir failure checked[L402]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[errexit.test.sh::while loop - redirect failure checked[L433]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[exit-status.test.sh::Truncating 'exit' status[L10]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[exit-status.test.sh::subshell OverflowError https://github.com/oilshell/oil/issues/996[L83]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[exit-status.test.sh::If subshell true[L263]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[exit-status.test.sh::Exit code when command sub evaluates to empty str, e.g. `false` (#2416)[L283]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[exit-status.test.sh::More test cases with empty argv[L309]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[extglob-files.test.sh::Two adjacent alternations[L76]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[extglob-files.test.sh::Glob other punctuation chars (lexer mode)[L133]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[extglob-files.test.sh::Escaping of pipe (glibc bug, see demo/glibc_fnmatch.c)[L159]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[extglob-files.test.sh::Extended glob in same word as array[L224]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[extglob-files.test.sh::Extended glob with word splitting[L255]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[extglob-files.test.sh::In Array Literal and for loop[L273]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[extglob-files.test.sh::no match[L305]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[extglob-match.test.sh::case with extglob[L219]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[extglob-match.test.sh::[[ $x == !($str) ]][L254]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[extglob-match.test.sh::Turning extglob on changes the meaning of [[ !(str) ]] in bash[L265]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[extglob-match.test.sh::With extglob on, !($str) on the left or right of == has different meanings[L285]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[fatal-errors.test.sh::Unrecoverable: divide by zero in redirect word[L8]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[fatal-errors.test.sh::Unrecoverable: divide by zero in case[L83]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[fatal-errors.test.sh::Unrecoverable: ${undef?message}[L132]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[fatal-errors.test.sh::${undef} with nounset[L161]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[for-expr.test.sh::Accepts { } syntax too[L43]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[for-expr.test.sh::Arith lexer mode[L96]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[for-expr.test.sh::Condition that's greater than 32 bits[L156]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[func-parsing.test.sh::Hard case, function with } token in it[L43]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[func-parsing.test.sh::Function name with $[L66]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[func-parsing.test.sh::Function name with command sub[L71]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob-bash.test.sh::shopt -s failglob in loop context[L31]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob-bash.test.sh::shopt -s failglob in array literal context[L49]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob-bash.test.sh::shopt -s failglob exits properly in loop context with set -e[L81]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob-bash.test.sh::shopt -s failglob behavior on single line with semicolon[L102]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::glob can expand to command and arg[L37]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::glob after var expansion[L43]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::store literal globs in array then expand[L76]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::glob inside array[L84]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::glob with escaped - in char class[L92]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::glob with char class expression[L98]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::glob escaped[L111]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::: escaped[L118]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::set -o noglob[L153]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::Splitting/Globbing doesn't happen on local assignment[L172]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::Glob of unescaped [[] and []][L186]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::Glob of negated unescaped [[] and []][L196]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::Glob ordering respects LC_COLLATE (zsh respects this too)[L262]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::\\ in unquoted substitutions does not match a backslash[L296]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::\\ in unquoted substitutions escapes globchars[L365]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[glob.test.sh::shopt -u globskipdots shows . and ..[L402]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::Don't glob flags on file system with GLOBIGNORE[L5]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::Ignore *.txt[L16]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::Ignore ?.txt[L26]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::Ignore *.o:*.h[L36]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::Ignore single file src/__main__.py[L44]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::Ignore dirs dist/*:node_modules/*[L53]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::find files in subdirectory but not the ignored pattern[L65]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::Ignore globs with char patterns like [!ab][L75]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::Ignore globs with char classes like [[:alnum:]][L91]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::Ignore *[L109]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::treat escaped patterns literally[L118]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::resetting globignore reverts to default behaviour[L126]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::Ignore .:..[L137]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::Quoting GLOBIGNORE[L151]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::When GLOBIGNORE is set, glob may become empty (nullglob too)[L183]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[globignore.test.sh::Extended glob expansion combined with GLOBIGNORE[L219]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[here-doc.test.sh::Here redirect with explicit descriptor[L22]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[here-doc.test.sh::Here doc from another input file descriptor[L29]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[here-doc.test.sh::Multiple here docs with different descriptors[L37]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[here-doc.test.sh::Two here docs -- first is ignored; second ones wins![L144]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[here-doc.test.sh::Here doc with line continuation, then pipe.  Syntax error.[L152]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[here-doc.test.sh::Compound command here doc[L195]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[here-doc.test.sh::Two compound commands with two here docs[L277]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[here-doc.test.sh::Function def and execution with here doc[L293]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[here-doc.test.sh::Multiple here docs in pipeline[L374]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[here-doc.test.sh::Multiple here docs in pipeline on multiple lines[L392]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[here-doc.test.sh::Here doc and backslash double quote[L412]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[here-doc.test.sh::Here doc escapes[L421]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[introspect.test.sh::${FUNCNAME[@]} array[L28]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[introspect.test.sh::FUNCNAME with source (scalar or array)[L44]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[introspect.test.sh::$((BASH_LINENO)) (scalar form in arith)[L150]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[introspect.test.sh::${BASH_SOURCE[@]} with source and function name[L157]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[introspect.test.sh::${BASH_LINENO[@]} is a stack of line numbers for function calls[L172]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[introspect.test.sh::Locations with temp frame[L191]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[introspect.test.sh::Locations when sourcing[L206]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[loop.test.sh::implicit for loop[L4]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[loop.test.sh::the word 'in' can be the loop variable[L39]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[loop.test.sh::while in pipe with subshell[L145]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[loop.test.sh::continue in subshell[L179]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[loop.test.sh::continue in subshell aborts with errexit[L214]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[loop.test.sh::too many args to continue[L273]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[loop.test.sh::break in condition of nested loop[L310]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[loop.test.sh::return within eval[L325]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[loop.test.sh::break/continue within eval[L336]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[loop.test.sh::break/continue within source[L366]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[loop.test.sh::top-level break/continue/return (without strict_control_flow)[L406]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[loop.test.sh::$b break, $c continue, $r return, $e exit[L483]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[loop.test.sh::\\break \\continue \\return \\exit[L536]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[loop.test.sh::builtin,command break,continue,return,exit[L582]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::pass array by reference[L4]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::mutate array by reference[L14]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::pass assoc array by reference[L27]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::pass local array by reference, relying on DYNAMIC SCOPING[L39]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::flag -n and +n[L56]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::mutating through nameref: ref=[L80]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::flag -n combined ${!ref} -- bash INVERTS[L107]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::named ref with 1 $1 etc.[L179]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::assign to empty nameref and invalid nameref[L275]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::-n attribute before it has a value[L299]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::exported nameref[L337]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::readonly var can't be assigned through nameref[L384]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::Mutually recursive namerefs detected on WRITE[L462]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::Dynamic scope with namerefs[L479]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::a[expr] in nameref[L546]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::a[@] in nameref[L556]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::bad mutation through nameref: ref[0]= where ref is array[0][L589]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::@ in nameref isn't supported, unlike in ${!ref}[L604]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nameref.test.sh::Unquoted assoc reference on RHS[L621]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nix-idioms.test.sh::var ref to array 'preHooks[@]'[L3]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nix-idioms.test.sh::Similar to above with set -u[L56]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nix-idioms.test.sh::${!ref} to undefined string var is fatal, INCONSISTENT with array[L103]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nix-idioms.test.sh::export with dynamic var name +=[L119]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nix-idioms.test.sh::let idiom can be written in POSIX shell - eval ": \\$(( ))"[L156]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nocasematch-match.test.sh::[[ equality matching[L6]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nocasematch-match.test.sh::[[ regex matching[L19]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nocasematch-match.test.sh::case matching[L41]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nul-bytes.test.sh::NUL bytes with echo -e[L5]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nul-bytes.test.sh::printf - literal NUL in format string[L27]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nul-bytes.test.sh::printf - \\0 escape shows NUL byte[L63]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nul-bytes.test.sh::printf - NUL byte in value (OSH and zsh agree)[L72]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nul-bytes.test.sh::NUL bytes with echo $'\\0' (OSH and zsh agree)[L96]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nul-bytes.test.sh::NUL bytes and IFS splitting[L119]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nul-bytes.test.sh::NUL bytes with test -f[L174]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nul-bytes.test.sh::Compare \\x00 byte versus \\x01 byte - command sub[L237]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nul-bytes.test.sh::Compare \\x00 byte versus \\x01 byte - read builtin[L276]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nul-bytes.test.sh::Compare \\x00 byte versus \\x01 byte - read -n[L310]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nul-bytes.test.sh::Compare \\x00 byte versus \\x01 byte - mapfile builtin[L354]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nul-bytes.test.sh::Strip ops # ## % %% with NUL bytes[L381]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nul-bytes.test.sh::Issue 2269 Reduction[L434]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[nul-bytes.test.sh::Issue 2269 - Do NUL bytes match ? in ${a#?}[L484]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[paren-ambiguity.test.sh::(( closed with ) ) after multiple lines is command - #2337[L4]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[paren-ambiguity.test.sh::$(( closed with ) ) after multiple lines is command - #2337[L18]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[paren-ambiguity.test.sh::$(( closed with )) after multiple lines is parse error - #2337[L56]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[paren-ambiguity.test.sh::$((which example - command sub versus arith sub - gnunet-gtk package[L130]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::Long Token - 65535 bytes[L4]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::Token that's too long for Oils - 65536 bytes[L15]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::Bad braced var sub -- not allowed[L32]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::Incomplete while[L46]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::Incomplete for[L53]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::Incomplete if[L60]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::do unexpected[L67]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::} is a parse error[L73]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::{ is its own word, needs a space[L80]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::} on the second line[L89]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::bad var name globally isn't parsed like an assignment[L106]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::misplaced parentheses are not a subshell[L126]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::incomplete command sub[L131]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::incomplete backticks[L136]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::misplaced ;;[L141]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::empty clause in [[[L147]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::interactive parse error (regression)[L156]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::array literal inside array is a parse error[L169]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::array literal inside loop is a parse error[L178]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::array literal in case[L190]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[parse-errors.test.sh::%foo=() is parse error (regression)[L203]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[pipeline.test.sh::While Loop ends pipeline[L27]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[pipeline.test.sh::Initial value of PIPESTATUS is empty string[L53]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[pipeline.test.sh::|&[L115]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[pipeline.test.sh::! turns non-zero into zero[L127]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[pipeline.test.sh::! turns zero into 1[L132]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[pipeline.test.sh::! is not a command[L175]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[pipeline.test.sh::Evaluation of argv[0] in pipeline occurs in child[L180]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[pipeline.test.sh::bash/dash/mksh run the last command is run in its own process[L192]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[pipeline.test.sh::shopt -s lastpipe (always on in OSH)[L205]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[pipeline.test.sh::SIGPIPE causes pipeline to die (regression for issue #295)[L216]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[pipeline.test.sh::Pipeline in eval[L240]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[pipeline.test.sh::shopt -s lastpipe and shopt -s no_last_fork interaction[L247]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[posix.test.sh::Empty for loop without in.  Do can be on the same line I guess.[L16]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[posix.test.sh::Empty action for case is syntax error[L57]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[posix.test.sh::Bare semi-colon not allowed[L86]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[posix.test.sh::Command substitution in default[L100]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[quote.test.sh::Backslash escapes inside double quoted string[L75]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[quote.test.sh::Unterminated double quote[L124]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[quote.test.sh::$'' octal escapes don't have leading 0[L182]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[quote.test.sh::$'' octal escapes with fewer than 3 chars[L192]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[quote.test.sh::$'' supports \\cA escape for Ctrl-A - mask with 0x1f[L240]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[quote.test.sh::\\c' is an escape, unlike bash[L282]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redir-order.test.sh::subshell + redirect order[L14]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redir-order.test.sh::for word + redirect order[L24]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redir-order.test.sh::case word + redirect order[L36]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redir-order.test.sh::[[ + redirect order[L53]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-command.test.sh::>$file touches a file[L8]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-command.test.sh::$(< $file) yields the contents of the file[L29]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-command.test.sh::`< $file` behaves like $(< file)[L43]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-command.test.sh::Redirect in command sub[L138]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-command.test.sh::Redirect in function body is evaluated multiple times[L184]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-command.test.sh::Redirect in function body AND function call[L204]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-command.test.sh::redirect bash extensions:   [[  ((  for (([L213]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-command.test.sh::Prefix redirect for loop -- not allowed[L291]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-command.test.sh::Nested function stdout redirect[L322]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-multi.test.sh::File redirects with glob args (bash and zsh only)[L3]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-multi.test.sh::File redirect without matching any file, with failglob[L58]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-multi.test.sh::Redirect to $empty (in function body)[L82]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-multi.test.sh::Redirect to ''[L94]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-multi.test.sh::File redirect to $var with glob char[L103]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-multi.test.sh::File redirect that globs to more than one file (bash and zsh only)[L132]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-multi.test.sh::File redirect with extended glob[L165]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-multi.test.sh::Extended glob that doesn't match anything[L193]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-multi.test.sh::Non-file redirects don't respect glob args (we differe from bash)[L230]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-multi.test.sh::Redirect with brace expansion isn't allowed[L258]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect-multi.test.sh::File redirects have word splitting too![L289]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::>& and <& are the same[L4]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::<&[L16]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::2&>1 (is it a redirect or is it like a&>1)[L38]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::Named file descriptor[L98]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::: 9> fdleak (OSH regression)[L115]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::3>&- << EOF (OSH regression: fail to restore fds)[L141]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::Redirect to empty string[L181]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::Redirect to file descriptor that's not open[L193]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::>| to clobber[L239]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::1>&2- to move file descriptor[L309]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::1>&2- (Bash bug: fail to restore closed fd)[L326]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::<> for read/write[L360]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::<> for read/write named pipes[L375]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::&>> appends stdout and stderr[L388]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::echo foo >&100 (OSH regression: does not fail with invalid fd 100)[L456]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::echo foo >&N where N is first unused fd[L464]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::exec {fd}>&- (OSH regression: fails to close fd)[L490]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::noclobber can still write to non-regular files like /dev/null[L503]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[redirect.test.sh::Parsing of x={myvar} and related cases[L542]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::BASH_REMATCH[L39]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Regex quoted with single quotes[L74]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Regex quoted with double quotes[L82]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Double quoting pat variable -- again bash doesn't like it.[L100]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Regex with == and not =~ is parse error, different lexer mode required[L116]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Regex to match literal brackets [][L140]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Regex to match literals . ^ $ etc.[L156]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Unquoted { is a regex parse error[L186]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Fatal error inside [[ =~ ]][L200]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Quoted { and +[L211]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Escaped {[L272]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::pattern a=(1)[L328]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Bug: Nix idiom with closing ) next to pattern[L354]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::unquoted (a  b) as pattern, (a  b|c)[L370]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Multiple adjacent () groups[L396]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::unquoted [a  b] as pattern, [a  b|c][L431]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Operator chars ; & but not |[L475]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Quotes '' "" $'' $"" in pattern[L547]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Unicode in pattern[L581]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Parse error with 2 words[L593]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::make a lisp example[L608]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[regex.test.sh::Operators and space lose meaning inside ()[L627]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[serialize.test.sh::printf %q newline[L12]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[serialize.test.sh::printf %q spaces[L36]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[serialize.test.sh::printf %q quotes[L52]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[serialize.test.sh::printf %q unprintable[L76]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[serialize.test.sh::printf %q unicode[L94]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[serialize.test.sh::printf %q invalid unicode[L111]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[serialize.test.sh::set[L159]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[serialize.test.sh::declare[L174]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[serialize.test.sh::${var@Q}[L193]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-func.test.sh::Return statement[L20]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-func.test.sh::return "" (a lot of disagreement)[L95]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-func.test.sh::return $empty[L124]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-func.test.sh::Scope of global variable when sourced in function (Shell Functions aren't Closures)[L158]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options-bash.test.sh::SHELLOPTS is updated when options are changed[L4]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options-bash.test.sh::SHELLOPTS is readonly[L24]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options-bash.test.sh::SHELLOPTS and BASHOPTS are non-empty[L37]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options-bash.test.sh::SHELLOPTS reflects flags like sh -x[L61]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options-bash.test.sh::export SHELLOPTS does cross-process tracing[L70]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options-bash.test.sh::export SHELLOPTS does cross-process tracing with bash[L88]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options-bash.test.sh::OSH calling bash with SHELLOPTS does not change braceexpand[L108]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options-bash.test.sh::shopt -s progcomp hostcomplete are stubs (bash-completion)[L149]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::$- with -c[L7]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::$- with pipefail[L16]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::$- and more options[L27]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::$- with interactive shell[L43]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::pass short options like sh -e[L51]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::pass long options like sh -o errexit[L57]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::pass shopt options like sh -O nullglob[L63]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::set -o vi/emacs[L75]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::vi and emacs are mutually exclusive[L85]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::interactive shell starts with emacs mode on[L115]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::-n for no execution (useful with --ast-output)[L164]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::shopt -p -o prints 'set' options[L195]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::shopt -o prints 'set' options[L217]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::noclobber off[L264]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::noclobber on[L286]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::noclobber on <>[L314]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::noclobber on &> >[L343]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::noclobber on &>> >>[L373]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::set without args lists variables[L406]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::set without args and array variables[L439]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::set without args and assoc array variables (not in OSH)[L460]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::shopt -p validates option names[L573]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::shopt -p -o validates option names[L598]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::stubbed out bash options[L614]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[sh-options.test.sh::shopt -s nounset works in YSH, not in bash[L639]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[shell-bugs.test.sh::./configure idiom[L17]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[shell-grammar.test.sh::Invalid token[L35]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[shell-grammar.test.sh::If with then on same line missing semicolon[L112]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[shell-grammar.test.sh::case item without ;; is not allowed[L154]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[shell-grammar.test.sh::Case all on one line without trailing ;; or ;[L186]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[shell-grammar.test.sh::case: Using ; instead of ;;[L191]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[smoke.test.sh::here doc with var[L44]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[strict-options.test.sh::Sourcing a script that returns at the top level[L37]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[strict-options.test.sh::return at top level is an error[L76]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[strict-options.test.sh::empty argv WITHOUT strict_argv[L95]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[strict-options.test.sh::automatically creating arrays are INDEXED, not associative[L185]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[temp-binding.test.sh::FOO=bar $unset - temp binding, then empty argv from unquoted unset var (#2411)[L161]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[tilde.test.sh::~ expansion in readonly assignment[L9]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[tilde.test.sh::No tilde expansion in word that looks like assignment but isn't[L25]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[tilde.test.sh::${undef:-~}[L48]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[tilde.test.sh::x=foo:~ has tilde expansion[L80]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[tilde.test.sh::tilde expansion an assignment keyword[L123]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[tilde.test.sh::x=${undef-~:~}[L137]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[type-compat.test.sh::declare -i with +=[L41]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[type-compat.test.sh::append in arith context[L90]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[type-compat.test.sh::declare array vs. associative array[L120]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-num.test.sh::$0 with stdin[L23]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-num.test.sh::$0 with -i[L27]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::Case folding - multi code point[L60]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::Lower Case with constant string (VERY WEIRD)[L142]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::Lower Case glob[L151]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::${x@u} U L - upper / lower case (bash 5.1 feature)[L164]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::${array@Q} and ${array[@]@Q}[L190]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::${!prefix@} ${!prefix*} yields sorted array of var names[L206]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::${var@a} for attributes[L247]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::undef and @P @Q @a[L270]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::argv array and @P @Q @a[L287]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::assoc array and @P @Q @a[L304]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::${#var@X} is a parse error[L343]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::undef vs. empty string in var ops[L384]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::-o nounset with var ops[L413]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::${a[0]@a} and ${a@a}[L427]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::${!r@a} with r='a[0]' (attribute for indirect expansion of an array element)[L439]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::Array expansion with nullary var op @Q[L460]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::Array expansion with nullary var op @P[L498]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-bash.test.sh::Array expansion with nullary var op @a[L528]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-len.test.sh::Length operator can't be followed by test operator[L55]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-len.test.sh::${#s} respects LC_ALL - length in bytes or code points[L80]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-patsub.test.sh::Pattern replacement[L7]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-patsub.test.sh::Replace is longest match[L59]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-patsub.test.sh::Replace hard glob[L71]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-patsub.test.sh::Confusing unquoted slash matches bash (and ash)[L104]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-patsub.test.sh::Synthesized ${x///} bug (similar to above)[L127]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-patsub.test.sh::Replace backslash[L170]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-patsub.test.sh::Substitute glob characters in pattern, quoted and unquoted[L205]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-patsub.test.sh::When LC_ALL=C, pattern ? doesn't match multibyte character[L245]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-patsub.test.sh::\\(\\) in pattern (regression)[L302]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-patsub.test.sh::patsub with single quotes and hyphen in character class (regression)[L318]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-patsub.test.sh::patsub with [^]][L332]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-patsub.test.sh::[a-z] Invalid range end is syntax error[L344]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-patsub.test.sh::Pattern is empty $foo$bar -- regression for infinite loop[L359]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-patsub.test.sh::Chromium from http://www.oilshell.org/blog/2016/11/07.html[L374]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-slice.test.sh::Cannot take length of substring slice[L14]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-slice.test.sh::String slice with math[L76]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-slice.test.sh::Slice with an index that's an array -- silent a[0] decay[L139]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-slice.test.sh::Slice with an assoc array[L155]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-slice.test.sh::Simple ${@:offset}[L170]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-slice.test.sh::${@:offset} and ${*:offset}[L191]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-slice.test.sh::${@:offset:length} and ${*:offset:length}[L246]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-slice.test.sh::${@:0:1}[L301]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-slice.test.sh::Permutations of implicit begin and length[L312]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-slice.test.sh::${array[@]:} vs ${array[@]: }  - bash and zsh inconsistent[L361]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-slice.test.sh::${array[@]::} has implicit length of zero - for ble.sh[L386]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-strip.test.sh::Remove const suffix is vectorized on $@ array[L22]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-strip.test.sh::Strip unicode prefix[L62]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-strip.test.sh::strip unquoted and quoted [[L158]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-strip.test.sh::strip unquoted and quoted [][L180]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-strip.test.sh::strip unquoted and quoted ?[L201]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-strip.test.sh::strip unquoted and quoted [a][L214]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-strip.test.sh::Nested % and # operators (bug reported by Crestwave)[L227]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-strip.test.sh::strip * (bug regression)[L253]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-strip.test.sh::strip none unicode[L329]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-strip.test.sh::Strip Right Brace (#702)[L342]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-strip.test.sh::\\(\\) in pattern (regression)[L369]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-test.test.sh::Nix idiom ${!hooksSlice+"${!hooksSlice}"} - was workaround for obsolete bash 4.3 bug[L263]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-test.test.sh::array and - and +[L311]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-test.test.sh::$@ ("") and - and +[L407]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-test.test.sh::$@ ("" "") and - and +[L430]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-test.test.sh::$* ("" "") and - and + (IFS=)[L443]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-test.test.sh::"$*" ("" "") and - and + (IFS=)[L463]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-test.test.sh::array ${arr[0]=x}[L555]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-test.test.sh::assoc array ${arr["k"]=x}[L571]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-test.test.sh::"\\z" as arg[L587]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-test.test.sh::"\\e" as arg[L613]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-op-test.test.sh::op-test for ${a} and ${a[0]}[L622]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::${!ref-default}[L17]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::${!undef:-}[L33]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::${!a[@]-'default'} is legal but fails with more than one element[L98]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::var ref: 1, @, *[L149]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::Var ref, then assignment with ${ := }[L181]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::Var ref, then error with ${ ? }[L191]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::Indirect expansion, THEN suffix operators[L200]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::var ref TO array var, with subscripts[L313]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::var ref TO assoc array a[key][L332]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::var ref TO array with arbitrary subscripts[L361]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::Bizarre tilde expansion in array index[L410]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::Indirect expansion TO fancy expansion features bash disallows[L424]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::${!OPTIND} (used by bash completion[L478]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::Var Ref Code Injection $(tee PWNED)[L512]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::${!array_ref:-set} and ${!array_ref:=assign}[L540]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::Array indirect expansion with suffix operators[L570]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::Array indirect expansion with replacements[L641]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-ref.test.sh::Array indirect expansion with @? conversion[L696]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::array with empty values[L27]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::Inner single quotes, outer double quotes[L61]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::Multiple words: no outer quotes, inner single quotes[L78]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::Multiple words: no outer quotes, inner double quotes[L82]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::Multiple words: outer double quotes, inner single quotes[L94]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::Mixed inner quotes[L99]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::part_value tree with multiple words[L107]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::Multiple words: no outer quotes, inner double quotes[L127]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::Multiple words: outer double quotes, inner single quotes[L142]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::No outer quotes, Multiple internal quotes[L151]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::Syntax error for single quote in double quote[L179]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::"${undef-'c d'}" and "${foo%'c d'}" are parsed differently[L186]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::single quotes work inside character classes[L269]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::comparison: :- operator with single quoted arg[L281]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::Right Brace as argument (similar to #702)[L290]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::Var substitution with newlines (#2492)[L315]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub-quote.test.sh::Var substitution with \\n in value[L345]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub.test.sh::Bad var sub[L8]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub.test.sh::Braced block inside ${}[L14]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub.test.sh::Filename redirect with "$@"[L24]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub.test.sh::Descriptor redirect to bad "$@"[L37]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[var-sub.test.sh::Here doc with bad "$@" delimiter[L47]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-bash.test.sh::$SHELL is set to what is in /etc/passwd[L4]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$PATH is set if unset at startup[L31]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$HOME is NOT set[L73]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::Vars set interactively only: $HISTFILE[L97]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::Some vars are set, even without startup file, or env: PATH, PWD[L112]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::UID EUID PPID can't be changed[L183]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$?[L260]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$BASHPID DOES change with subshell and command sub[L308]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::Background PID $! looks like a PID[L338]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$PPID[L346]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$UID and $EUID[L371]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$OSTYPE is non-empty[L381]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$HOSTNAME[L391]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$LINENO in "bare" redirect arg (bug regression)[L431]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$LINENO in [[[L455]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$LINENO in (([L467]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$LINENO in other for loops[L498]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$LINENO in for (( loop[L509]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$_ and ${_}[L560]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$_ with pipeline and subshell[L591]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[vars-special.test.sh::$_ with assignments, arrays, etc.[L663]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[whitespace.test.sh::Parsing shell words \\r \\v[L3]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[whitespace.test.sh::\\r in arith expression is allowed by some shells, but not most![L25]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[whitespace.test.sh::whitespace in string to integer conversion[L51]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[whitespace.test.sh::\\r at end of line is not special[L77]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[whitespace.test.sh::Default IFS does not include \\r \\v \\f[L90]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-eval.test.sh::Word joining[L35]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-eval.test.sh::Default values -- more cases[L49]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-eval.test.sh::Globbing after splitting[L53]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-eval.test.sh::Globbing escaping[L60]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::$* with empty IFS[L73]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::Leading ' ' vs leading ' _ '[L117]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::IFS with whitespace and non-whitepace.[L135]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::empty literals are not elided[L157]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::default value can yield multiple words[L168]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::default value can yield multiple words with part joining[L172]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::default value with unquoted IFS char[L176]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::IFS empty doesn't do splitting[L181]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::IFS unset behaves like $' \\t\\n'[L190]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::IFS='\\ '[L208]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::IFS and joining arrays[L271]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::IFS='' with $@ and $* (bug #627)[L339]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::IFS='' with $@ and $* and printf (bug #627)[L351]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::IFS='' with ${a[@]} and ${a[*]} (bug #627)[L363]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::IFS='' with ${!prefix@} and ${!prefix*} (bug #627)[L377]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::IFS='' with ${!a[@]} and ${!a[*]} (bug #627)[L396]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::Bug #628 split on : with : in literal word[L414]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::Bug #1664, \\\\ with noglob[L452]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::Empty IFS bug #2141 (from pnut)[L484]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::4 x 3 table: (default IFS, IFS='', IFS=zx) x ( $* "$*" $@ "$@" )[L542]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::4 x 3 table - with for loop[L623]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::IFS=x and '' and $@ - same bug as spec/toysh-posix case #12[L670]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::IFS=x and '' and $@ (#2)[L714]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::""$A"" - empty string on both sides - derived from spec/toysh-posix #15[L815]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[word-split.test.sh::Regression: "${v:-AxBxC}"x should not be split[L894]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[xtrace.test.sh::set -o verbose prints unevaluated code[L28]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[xtrace.test.sh::xtrace with unprintable chars[L49]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[xtrace.test.sh::xtrace with variables in PS4[L297]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[xtrace.test.sh::PS4 with unterminated ${[L332]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[xtrace.test.sh::PS4 with unterminated $([L349]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[xtrace.test.sh::Reading $? in PS4[L384]]
FAILED tests/spec_tests/test_spec.py::TestBashSpecTests::test_spec_case[zsh-idioms.test.sh::zsh var sub is rejected at runtime[L27]]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[alias.test.sh] - Fa...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[append.test.sh] - F...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[arg-parse.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[arith-context.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[arith-dynamic.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[arith.test.sh] - Fa...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[array-assign.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[array-assoc.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[array-compat.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[array-literal.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[array-sparse.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[array.test.sh] - Fa...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[assign-deferred.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[assign-dialects.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[assign-extended.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[assign.test.sh] - F...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[ble-features.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[ble-unset.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[bool-parse.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[brace-expansion.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[bugs.test.sh] - Fai...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-bash.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-bracket.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-cd.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-completion.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-dirs.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-echo.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-eval-source.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-getopts.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-meta-assign.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-meta.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-misc.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-printf.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-read.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-set.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-special.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-type-bash.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-type.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[builtin-vars.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[case_.test.sh] - Fa...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[command-parsing.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[command-sub-ksh.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[command-sub.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[command_.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[dbracket.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[dparen.test.sh] - F...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[empty-bodies.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[errexit.test.sh] - ...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[exit-status.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[extglob-files.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[extglob-match.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[fatal-errors.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[for-expr.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[func-parsing.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[glob-bash.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[glob.test.sh] - Fai...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[globignore.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[here-doc.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[introspect.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[loop.test.sh] - Fai...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[nameref.test.sh] - ...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[nix-idioms.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[nocasematch-match.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[nul-bytes.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[paren-ambiguity.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[parse-errors.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[pipeline.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[posix.test.sh] - Fa...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[quote.test.sh] - Fa...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[redir-order.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[redirect-command.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[redirect-multi.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[redirect.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[regex.test.sh] - Fa...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[serialize.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[sh-func.test.sh] - ...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[sh-options-bash.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[sh-options.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[shell-bugs.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[shell-grammar.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[smoke.test.sh] - Fa...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[strict-options.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[temp-binding.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[tilde.test.sh] - Fa...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[type-compat.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[var-num.test.sh] - ...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[var-op-bash.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[var-op-len.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[var-op-patsub.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[var-op-slice.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[var-op-strip.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[var-op-test.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[var-ref.test.sh] - ...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[var-sub-quote.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[var-sub.test.sh] - ...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[vars-bash.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[vars-special.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[whitespace.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[word-eval.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[word-split.test.sh]
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[xtrace.test.sh] - F...
FAILED tests/spec_tests/test_spec.py::test_bash_spec_file[zsh-idioms.test.sh]
FAILED tests/spec_tests/test_spec.py::test_grep_spec_file[busybox-grep.tests]
FAILED tests/spec_tests/test_spec.py::test_grep_spec_file[gnu-bre.tests] - Fa...
FAILED tests/spec_tests/test_spec.py::test_grep_spec_file[gnu-ere.tests] - Fa...
FAILED tests/spec_tests/test_spec.py::test_grep_spec_file[gnu-spencer2.tests]
FAILED tests/spec_tests/test_spec.py::test_sed_spec_file[busybox-sed.tests]
FAILED tests/spec_tests/test_spec.py::test_sed_spec_file[pythonsed-chang.suite]
FAILED tests/spec_tests/test_spec.py::test_sed_spec_file[pythonsed-unit.suite]
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.-f-f] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.argv] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.builtin] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.close] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.clv] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.csconcat] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.exprconv] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.func] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.gawk] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.getline] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.latin1] - UnicodeD...
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.misc] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.overflow] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.recache] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.redir] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.split] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_awk_spec_file[T.system] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_jq_spec_file[base64.test] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_jq_spec_file[jq.test] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_jq_spec_file[man.test] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_jq_spec_file[manonig.test] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_jq_spec_file[onig.test] - Failed: 
FAILED tests/spec_tests/test_spec.py::test_jq_spec_file[optional.test] - Fail...
FAILED tests/spec_tests/test_spec.py::test_jq_spec_file[uri.test] - Failed: 
1258 failed, 1154 passed, 75 skipped, 18 warnings in 108.76s (0:01:48)
