phasm/phasm/stdlib/types.py
Johan B.W. de Vries b48260ccfa Removes the special casing for foldl
Now both dynamic and static arrays can be fully fold'ed.

Also adds support for type classes that have a function
argument.

Also, various usability improvements to WasmGenerator.

Also, integration tests now don't dump their stuff without
VERBOSE=1, this speeds up the tests suite by a factor of 9.

Also, tests can now set with_traces to add a number of
tracing functions for help debugging your code.
2025-05-21 18:28:37 +02:00

1736 lines
49 KiB
Python

"""
stdlib: Standard types that are not wasm primitives
"""
from typing import Mapping, NamedTuple, Type
from phasm.stdlib import alloc
from phasm.type3.routers import TypeVariableLookup
from phasm.type3.types import IntType3, Type3
from phasm.wasm import (
WasmType,
WasmTypeFloat32,
WasmTypeFloat64,
WasmTypeInt32,
WasmTypeInt64,
WasmTypeNone,
)
from phasm.wasmgenerator import Generator, func_wrapper
from phasm.wasmgenerator import VarType_i32 as i32
from phasm.wasmgenerator import VarType_i64 as i64
TypeInfo = NamedTuple('TypeInfo', [
# Name of the type
('typ', str, ),
# What WebAssembly type to use when passing this value around
# For example in function arguments
('wasm_type', Type[WasmType]),
# What WebAssembly function to use when loading a value from memory
('wasm_load_func', str),
# What WebAssembly function to use when storing a value to memory
('wasm_store_func', str),
# When storing this value in memory, how many bytes do we use?
# Only valid for non-constructed types, see calculate_alloc_size
# Should match wasm_load_func / wasm_store_func
('alloc_size', int),
])
TYPE_INFO_MAP: Mapping[str, TypeInfo] = {
'none': TypeInfo('none', WasmTypeNone, 'nop', 'nop', 0),
'bool': TypeInfo('bool', WasmTypeInt32, 'i32.load8_u', 'i32.store8', 1),
'u8': TypeInfo('u8', WasmTypeInt32, 'i32.load8_u', 'i32.store8', 1),
'i8': TypeInfo('i8', WasmTypeInt32, 'i32.load8_s', 'i32.store8', 1),
'u32': TypeInfo('u32', WasmTypeInt32, 'i32.load', 'i32.store', 4),
'u64': TypeInfo('u64', WasmTypeInt64, 'i64.load', 'i64.store', 8),
'i32': TypeInfo('i32', WasmTypeInt32, 'i32.load', 'i32.store', 4),
'i64': TypeInfo('i64', WasmTypeInt64, 'i64.load', 'i64.store', 8),
'f32': TypeInfo('f32', WasmTypeFloat32, 'f32.load', 'f32.store', 4),
'f64': TypeInfo('f64', WasmTypeFloat64, 'f64.load', 'f64.store', 8),
'ptr': TypeInfo('ptr', WasmTypeInt32, 'i32.load', 'i32.store', 4),
}
# By default, constructed types are passed as pointers
# NOTE: ALLOC SIZE HERE DOES NOT WORK FOR CONSTRUCTED TYPES
# USE runtime.calculate_alloc_size FOR ACCURATE RESULTS
# Functions count as constructed types - even though they are
# not memory pointers but table addresses instead.
TYPE_INFO_CONSTRUCTED = TypeInfo('t a', WasmTypeInt32, 'i32.load', 'i32.store', 4)
@func_wrapper()
def __alloc_bytes__(g: Generator, length: i32) -> i32:
"""
Allocates room for a bytes instance, but does not write
anything to the allocated memory
"""
result = i32('result')
# Allocate the length of the byte string, as well
# as 4 bytes for a length header
g.local.get(length)
g.i32.const(4)
g.i32.add()
g.call(alloc.__alloc__)
# Store the address in a variable so we can use it up
# for writing the length header
g.local.tee(result)
g.local.get(length)
g.i32.store()
# Get the address back from the variable as return
g.local.get(result)
return i32('return') # To satisfy mypy
@func_wrapper()
def __subscript_bytes__(g: Generator, adr: i32, ofs: i32) -> i32:
"""
Returns an index from a bytes value
If ofs is more than the length of the bytes, this
function stop as unreachable.
adr i32 The pointer for the allocated bytes
ofs i32 The offset within the allocated bytes
"""
g.local.get(ofs)
g.local.get(adr)
g.i32.load()
g.i32.ge_u()
with g.if_():
# The offset is outside the allocated bytes
g.unreachable(comment='Out of bounds')
# The offset is less than the length
g.local.get(adr)
g.i32.const(4) # Bytes header
g.i32.add()
g.local.get(ofs)
g.i32.add()
g.i32.load8_u()
g.return_()
return i32('return') # To satisfy mypy
@func_wrapper()
def __u32_ord_min__(g: Generator, x: i32, y: i32) -> i32:
g.local.get(x)
g.local.get(y)
g.i32.lt_u()
with g.if_():
g.local.get(x)
g.return_()
g.local.get(y)
g.return_()
return i32('return') # To satisfy mypy
@func_wrapper()
def __u64_ord_min__(g: Generator, x: i64, y: i64) -> i64:
g.local.get(x)
g.local.get(y)
g.i64.lt_u()
with g.if_():
g.local.get(x)
g.return_()
g.local.get(y)
g.return_()
return i64('return') # To satisfy mypy
@func_wrapper()
def __i32_ord_min__(g: Generator, x: i32, y: i32) -> i32:
g.local.get(x)
g.local.get(y)
g.i32.lt_s()
with g.if_():
g.local.get(x)
g.return_()
g.local.get(y)
g.return_()
return i32('return') # To satisfy mypy
@func_wrapper()
def __i64_ord_min__(g: Generator, x: i64, y: i64) -> i64:
g.local.get(x)
g.local.get(y)
g.i64.lt_s()
with g.if_():
g.local.get(x)
g.return_()
g.local.get(y)
g.return_()
return i64('return') # To satisfy mypy
@func_wrapper()
def __u32_ord_max__(g: Generator, x: i32, y: i32) -> i32:
g.local.get(x)
g.local.get(y)
g.i32.gt_u()
with g.if_():
g.local.get(x)
g.return_()
g.local.get(y)
g.return_()
return i32('return') # To satisfy mypy
@func_wrapper()
def __u64_ord_max__(g: Generator, x: i64, y: i64) -> i64:
g.local.get(x)
g.local.get(y)
g.i64.gt_u()
with g.if_():
g.local.get(x)
g.return_()
g.local.get(y)
g.return_()
return i64('return') # To satisfy mypy
@func_wrapper()
def __i32_ord_max__(g: Generator, x: i32, y: i32) -> i32:
g.local.get(x)
g.local.get(y)
g.i32.gt_s()
with g.if_():
g.local.get(x)
g.return_()
g.local.get(y)
g.return_()
return i32('return') # To satisfy mypy
@func_wrapper()
def __i64_ord_max__(g: Generator, x: i64, y: i64) -> i64:
g.local.get(x)
g.local.get(y)
g.i64.gt_s()
with g.if_():
g.local.get(x)
g.return_()
g.local.get(y)
g.return_()
return i64('return') # To satisfy mypy
@func_wrapper()
def __i32_intnum_abs__(g: Generator, x: i32) -> i32:
# https://stackoverflow.com/a/14194764
y = i32('y')
# z = i32('z')
# y = x >> 31
g.local.get(x)
g.i32.const(31)
g.i32.shr_s() # Must be arithmetic shift
g.local.set(y)
# abs(x) = (x XOR y) - y
# (x XOR y)
g.local.get(x)
g.local.get(y)
g.i32.xor()
# - y
g.local.get(y)
g.i32.sub()
g.return_()
return i32('return') # To satisfy mypy
@func_wrapper()
def __i64_intnum_abs__(g: Generator, x: i64) -> i64:
# https://stackoverflow.com/a/14194764
y = i64('y')
# z = i64('z')
# y = x >> 31
g.local.get(x)
g.i64.const(31)
g.i64.shr_s() # Must be arithmetic shift
g.local.set(y)
# abs(x) = (x XOR y) - y
# (x XOR y)
g.local.get(x)
g.local.get(y)
g.i64.xor()
# - y
g.local.get(y)
g.i64.sub()
g.return_()
return i64('return') # To satisfy mypy
@func_wrapper()
def __u32_pow2__(g: Generator, x: i32) -> i32:
# 2^0 == 1
g.local.get(x)
g.i32.eqz()
with g.if_():
g.i32.const(1)
g.return_()
# 2 ^ x == 2 << (x - 1)
# (when x > 1)
g.i32.const(2)
g.local.get(x)
g.i32.const(1)
g.i32.sub()
g.i32.shl()
return i32('return') # To satisfy mypy
@func_wrapper()
def __u8_rotl__(g: Generator, x: i32, r: i32) -> i32:
s = i32('s') # The shifted part we need to overlay
# Handle cases where we need to shift more than 8 bits
g.local.get(r)
g.i32.const(8)
g.i32.rem_u()
g.local.set(r)
# Now do the rotation
g.local.get(x)
# 0000 0000 1100 0011
g.local.get(r)
# 0000 0000 1100 0011, 3
g.i32.shl()
# 0000 0110 0001 1000
g.local.tee(s)
# 0000 0110 0001 1000
g.i32.const(255)
# 0000 0110 0001 1000, 0000 0000 1111 1111
g.i32.and_()
# 0000 0000 0001 1000
g.local.get(s)
# 0000 0000 0001 1000, 0000 0110 0001 1000
g.i32.const(65280)
# 0000 0000 0001 1000, 0000 0110 0001 1000, 1111 1111 0000 0000
g.i32.and_()
# 0000 0000 0001 1000, 0000 0110 0000 0000
g.i32.const(8)
# 0000 0000 0001 1000, 0000 0110 0000 0000, 8
g.i32.shr_u()
# 0000 0000 0001 1000, 0000 0000 0000 0110
g.i32.or_()
# 0000 0000 0001 110
g.return_()
return i32('return') # To satisfy mypy
@func_wrapper()
def __u8_rotr__(g: Generator, x: i32, r: i32) -> i32:
s = i32('s') # The shifted part we need to overlay
# Handle cases where we need to shift more than 8 bits
g.local.get(r)
g.i32.const(8)
g.i32.rem_u()
g.local.set(r)
# Now do the rotation
g.local.get(x)
# 0000 0000 1100 0011
g.local.get(r)
# 0000 0000 1100 0011, 3
g.i32.rotr()
# 0110 0000 0000 0000 0000 0000 0001 1000
g.local.tee(s)
# 0110 0000 0000 0000 0000 0000 0001 1000
g.i32.const(255)
# 0110 0000 0000 0000 0000 0000 0001 1000, 0000 0000 1111 1111
g.i32.and_()
# 0000 0000 0000 0000 0000 0000 0001 1000
g.local.get(s)
# 0000 0000 0000 0000 0000 0000 0001 1000, 0110 0000 0000 0000 0000 0000 0001 1000
g.i32.const(4278190080)
# 0000 0000 0000 0000 0000 0000 0001 1000, 0110 0000 0000 0000 0000 0000 0001 1000, 1111 1111 0000 0000 0000 0000 0000 0000
g.i32.and_()
# 0000 0000 0000 0000 0000 0000 0001 1000, 0110 0000 0000 0000 0000 0000 0000 0000
g.i32.const(24)
# 0000 0000 0000 0000 0000 0000 0001 1000, 0110 0000 0000 0000 0000 0000 0000 0000, 24
g.i32.shr_u()
# 0000 0000 0000 0000 0000 0000 0001 1000, 0000 0000 0000 0000 0000 0000 0110 0000
g.i32.or_()
# 0000 0000 0000 0000 0000 0000 0111 1000
g.return_()
return i32('return') # To satisfy mypy
## ###
## class Eq
def u8_eq_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.eq()
def u32_eq_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.eq()
def u64_eq_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.eq()
def i8_eq_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.eq()
def i32_eq_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.eq()
def i64_eq_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.eq()
def f32_eq_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.eq()
def f64_eq_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.eq()
def u8_eq_not_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.ne()
def u32_eq_not_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.ne()
def u64_eq_not_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.ne()
def i8_eq_not_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.ne()
def i32_eq_not_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.ne()
def i64_eq_not_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.ne()
def f32_eq_not_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.ne()
def f64_eq_not_equals(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.ne()
## ###
## class Ord
def u8_ord_min(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__u32_ord_min__')
def u32_ord_min(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__u32_ord_min__')
def u64_ord_min(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__u64_ord_min__')
def i8_ord_min(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__i32_ord_min__')
def i32_ord_min(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__i32_ord_min__')
def i64_ord_min(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__i64_ord_min__')
def f32_ord_min(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.min()
def f64_ord_min(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.min()
def u8_ord_max(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__u32_ord_max__')
def u32_ord_max(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__u32_ord_max__')
def u64_ord_max(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__u64_ord_max__')
def i8_ord_max(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__i32_ord_max__')
def i32_ord_max(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__i32_ord_max__')
def i64_ord_max(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__i64_ord_max__')
def f32_ord_max(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.max()
def f64_ord_max(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.max()
def u8_ord_less_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.lt_u()
def u32_ord_less_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.lt_u()
def u64_ord_less_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.lt_u()
def i8_ord_less_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.lt_s()
def i32_ord_less_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.lt_s()
def i64_ord_less_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.lt_s()
def f32_ord_less_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.lt()
def f64_ord_less_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.lt()
def u8_ord_less_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.le_u()
def u32_ord_less_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.le_u()
def u64_ord_less_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.le_u()
def i8_ord_less_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.le_s()
def i32_ord_less_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.le_s()
def i64_ord_less_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.le_s()
def f32_ord_less_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.le()
def f64_ord_less_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.le()
def u8_ord_greater_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.gt_u()
def u32_ord_greater_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.gt_u()
def u64_ord_greater_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.gt_u()
def i8_ord_greater_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.gt_s()
def i32_ord_greater_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.gt_s()
def i64_ord_greater_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.gt_s()
def f32_ord_greater_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.gt()
def f64_ord_greater_than(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.gt()
def u8_ord_greater_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.ge_u()
def u32_ord_greater_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.ge_u()
def u64_ord_greater_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.ge_u()
def i8_ord_greater_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.ge_s()
def i32_ord_greater_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.ge_s()
def i64_ord_greater_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.ge_s()
def f32_ord_greater_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.ge()
def f64_ord_greater_than_or_equal(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.ge()
## ###
## class Bits
def u8_bits_logical_shift_left(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.shl()
g.i32.const(255)
g.i32.and_()
def u32_bits_logical_shift_left(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.shl()
def u64_bits_logical_shift_left(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.extend_i32_u()
g.i64.shl()
def u8_bits_logical_shift_right(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.shr_u()
def u32_bits_logical_shift_right(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.shr_u()
def u64_bits_logical_shift_right(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.extend_i32_u()
g.i64.shr_u()
def u8_bits_rotate_left(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__u8_rotl__')
def u32_bits_rotate_left(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.rotl()
def u64_bits_rotate_left(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.extend_i32_u()
g.i64.rotl()
def u8_bits_rotate_right(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__u8_rotr__')
def u32_bits_rotate_right(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.rotr()
def u64_bits_rotate_right(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.extend_i32_u()
g.i64.rotr()
def u8_bits_bitwise_and(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.and_()
def u32_bits_bitwise_and(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.and_()
def u64_bits_bitwise_and(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.and_()
def u8_bits_bitwise_or(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.or_()
def u32_bits_bitwise_or(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.or_()
def u64_bits_bitwise_or(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.or_()
def u8_bits_bitwise_xor(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.xor()
def u32_bits_bitwise_xor(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.xor()
def u64_bits_bitwise_xor(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.xor()
## ###
## class Fractional
def f32_fractional_ceil(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.ceil()
def f64_fractional_ceil(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.ceil()
def f32_fractional_floor(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.floor()
def f64_fractional_floor(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.floor()
def f32_fractional_trunc(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.trunc()
def f64_fractional_trunc(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.trunc()
def f32_fractional_nearest(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.nearest()
def f64_fractional_nearest(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.nearest()
def f32_fractional_div(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.div()
def f64_fractional_div(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.div()
## ###
## class Floating
def f32_floating_sqrt(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('f32.sqrt')
def f64_floating_sqrt(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('f64.sqrt')
## ###
## class Integral
def u32_integral_div(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i32.div_u')
def u64_integral_div(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i64.div_u')
def i32_integral_div(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i32.div_s')
def i64_integral_div(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i64.div_s')
def u32_integral_rem(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i32.rem_u')
def u64_integral_rem(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i64.rem_u')
def i32_integral_rem(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i32.rem_s')
def i64_integral_rem(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i64.rem_s')
## ###
## class NatNum
def u32_natnum_add(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i32.add')
def u64_natnum_add(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i64.add')
def i32_natnum_add(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i32.add')
def i64_natnum_add(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i64.add')
def f32_natnum_add(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('f32.add')
def f64_natnum_add(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('f64.add')
def u32_natnum_sub(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i32.sub')
def u64_natnum_sub(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i64.sub')
def i32_natnum_sub(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i32.sub')
def i64_natnum_sub(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i64.sub')
def f32_natnum_sub(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('f32.sub')
def f64_natnum_sub(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('f64.sub')
def u32_natnum_mul(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i32.mul')
def u64_natnum_mul(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i64.mul')
def i32_natnum_mul(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i32.mul')
def i64_natnum_mul(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('i64.mul')
def f32_natnum_mul(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('f32.mul')
def f64_natnum_mul(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('f64.mul')
def u32_natnum_arithmic_shift_left(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.shl()
def u64_natnum_arithmic_shift_left(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.extend_i32_u()
g.i64.shl()
def i32_natnum_arithmic_shift_left(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.shl()
def i64_natnum_arithmic_shift_left(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.extend_i32_u()
g.i64.shl()
def f32_natnum_arithmic_shift_left(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__u32_pow2__')
g.f32.convert_i32_u()
g.f32.mul()
def f64_natnum_arithmic_shift_left(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__u32_pow2__')
g.f64.convert_i32_u()
g.f64.mul()
def u32_natnum_arithmic_shift_right(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.shr_u()
def u64_natnum_arithmic_shift_right(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.extend_i32_u()
g.i64.shr_u()
def i32_natnum_arithmic_shift_right(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.shr_s()
def i64_natnum_arithmic_shift_right(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.extend_i32_u()
g.i64.shr_s()
def f32_natnum_arithmic_shift_right(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__u32_pow2__')
g.f32.convert_i32_u()
g.f32.div()
def f64_natnum_arithmic_shift_right(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__u32_pow2__')
g.f64.convert_i32_u()
g.f64.div()
## ###
## class IntNum
def i32_intnum_abs(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__i32_intnum_abs__')
def i64_intnum_abs(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.add_statement('call $stdlib.types.__i64_intnum_abs__')
def f32_intnum_abs(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.abs()
def f64_intnum_abs(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.abs()
def i32_intnum_neg(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.const(-1)
g.i32.mul()
def i64_intnum_neg(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.const(-1)
g.i64.mul()
def f32_intnum_neg(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.neg()
def f64_intnum_neg(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.neg()
## ###
## Class Sized
def dynamic_array_sized_len(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
# The length is stored in the first 4 bytes
g.i32.load()
def static_array_sized_len(g: Generator, tvl: TypeVariableLookup) -> None:
tv_map, tc_map = tvl
tvn_map = {
x.name: y
for x, y in tv_map.items()
}
sa_len = tvn_map['a*']
assert isinstance(sa_len, IntType3)
g.i32.const(sa_len.value)
## ###
## Extendable
def u8_u32_extend(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
# No-op
# u8 is already stored as u32
pass
def u8_u64_extend(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.extend_i32_u()
def u32_u64_extend(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.extend_i32_u()
def i8_i32_extend(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
# No-op
# i8 is already stored as i32
pass
def i8_i64_extend(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.extend_i32_s()
def i32_i64_extend(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i64.extend_i32_s()
def u8_u32_wrap(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.const(0xFF)
g.i32.and_()
def u8_u64_wrap(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.wrap_i64()
g.i32.const(0xFF)
g.i32.and_()
def u32_u64_wrap(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.wrap_i64()
def i8_i32_wrap(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.const(0xFF)
g.i32.and_()
def i8_i64_wrap(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.wrap_i64()
def i32_i64_wrap(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.i32.wrap_i64()
## ###
## Promotable
def f32_f64_promote(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f64.promote_f32()
def f32_f64_demote(g: Generator, tv_map: TypeVariableLookup) -> None:
del tv_map
g.f32.demote_f64()
## ###
## Foldable
def dynamic_array_sum(g: Generator, tvl: TypeVariableLookup) -> None:
tv_map, tc_map = tvl
tvn_map = {
x.name: y
for x, y in tv_map.items()
}
sa_type = tvn_map['a']
assert isinstance(sa_type, Type3)
sa_type_info = TYPE_INFO_MAP.get(sa_type.name, TYPE_INFO_CONSTRUCTED)
# FIXME: This breaks when users start implementing their own NatNum classes
type_var_add_generator = {
'u32': u32_natnum_add,
'u64': u64_natnum_add,
'i32': i32_natnum_add,
'i64': i64_natnum_add,
'f32': f32_natnum_add,
'f64': f64_natnum_add,
}
sa_type_add_gen = type_var_add_generator[sa_type.name]
# Definitions
sum_adr = g.temp_var(i32('sum_adr'))
sum_stop = g.temp_var(i32('sum_stop'))
with g.block(params=['i32'], result=sa_type_info.wasm_type):
# Stack: [adr] -> [] ; sum_adr=ard
g.local.set(sum_adr)
# Stack: [] ; sum_stop = adr + 4 + len(adr) * sa_type_info.alloc_size
g.nop(comment='Calculate address at which to stop looping')
g.local.get(sum_adr)
g.i32.load()
g.i32.const(sa_type_info.alloc_size)
g.i32.mul()
g.local.get(sum_adr)
g.i32.add()
g.i32.const(4)
g.i32.add()
g.local.set(sum_stop)
# Stack: [] -> [sum] ; sum_adr += 4
g.nop(comment='Get the first array value as starting point')
g.local.get(sum_adr)
g.i32.const(4)
g.i32.add()
g.local.tee(sum_adr)
g.add_statement(sa_type_info.wasm_load_func)
# Since we did the first one, increase adr
# Stack: [sum] -> [sum] ; sum_adr = sum_adr + sa_type_info.alloc_size
g.local.get(sum_adr)
g.i32.const(sa_type_info.alloc_size)
g.i32.add()
g.local.set(sum_adr)
g.local.get(sum_adr)
g.local.get(sum_stop)
g.i32.lt_u()
with g.if_(params=[sa_type_info.wasm_type], result=sa_type_info.wasm_type):
with g.loop(params=[sa_type_info.wasm_type], result=sa_type_info.wasm_type):
# sum = sum + *adr
# Stack: [sum] -> [sum + *adr]
g.nop(comment='Add array value')
g.local.get(sum_adr)
g.add_statement(sa_type_info.wasm_load_func)
sa_type_add_gen(g, ({}, {}, ))
# adr = adr + sa_type_info.alloc_size
# Stack: [sum] -> [sum]
g.nop(comment='Calculate address of the next value')
g.local.get(sum_adr)
g.i32.const(sa_type_info.alloc_size)
g.i32.add()
g.local.tee(sum_adr)
# loop if adr < stop
g.nop(comment='Check if address exceeds array bounds')
g.local.get(sum_stop)
g.i32.lt_u()
g.br_if(0)
# else: sum x[1] === x => so we don't need to loop
# End result: [sum]
def static_array_sum(g: Generator, tvl: TypeVariableLookup) -> None:
tv_map, tc_map = tvl
tvn_map = {
x.name: y
for x, y in tv_map.items()
}
sa_type = tvn_map['a']
sa_len = tvn_map['a*']
assert isinstance(sa_type, Type3)
assert isinstance(sa_len, IntType3)
if sa_len.value < 1:
raise NotImplementedError('Default value in case sum is empty')
sa_type_info = TYPE_INFO_MAP.get(sa_type.name, TYPE_INFO_CONSTRUCTED)
# FIXME: This breaks when users start implementing their own NatNum classes
type_var_add_generator = {
'u32': u32_natnum_add,
'u64': u64_natnum_add,
'i32': i32_natnum_add,
'i64': i64_natnum_add,
'f32': f32_natnum_add,
'f64': f64_natnum_add,
}
sa_type_add_gen = type_var_add_generator[sa_type.name]
# Definitions
sum_adr = g.temp_var(i32('sum_adr'))
sum_stop = g.temp_var(i32('sum_stop'))
# Stack before: [adr]
# Stack after: [sum]
# adr = {address of what's currently on stack}
# Stack: [adr] -> []
g.nop(comment=f'Start sum for {sa_type.name}[{sa_len.value}]')
g.local.set(sum_adr)
# stop = adr + ar_len * sa_type_info.alloc_size
# Stack: []
g.nop(comment='Calculate address at which to stop looping')
g.local.get(sum_adr)
g.i32.const(sa_len.value * sa_type_info.alloc_size)
g.i32.add()
g.local.set(sum_stop)
# sum = *adr
# Stack: [] -> [sum]
g.nop(comment='Get the first array value as starting point')
g.local.get(sum_adr)
g.add_statement(sa_type_info.wasm_load_func)
# Since we did the first one, increase adr
# adr = adr + sa_type_info.alloc_size
# Stack: [sum] -> [sum]
g.local.get(sum_adr)
g.i32.const(sa_type_info.alloc_size)
g.i32.add()
g.local.set(sum_adr)
if sa_len.value > 1:
with g.loop(params=[sa_type_info.wasm_type], result=sa_type_info.wasm_type):
# sum = sum + *adr
# Stack: [sum] -> [sum + *adr]
g.nop(comment='Add array value')
g.local.get(sum_adr)
g.add_statement(sa_type_info.wasm_load_func)
sa_type_add_gen(g, ({}, {}, ))
# adr = adr + sa_type_info.alloc_size
# Stack: [sum] -> [sum]
g.nop(comment='Calculate address of the next value')
g.local.get(sum_adr)
g.i32.const(sa_type_info.alloc_size)
g.i32.add()
g.local.tee(sum_adr)
# loop if adr < stop
g.nop(comment='Check if address exceeds array bounds')
g.local.get(sum_stop)
g.i32.lt_u()
g.br_if(0)
# else: sum x[1] === x => so we don't need to loop
g.nop(comment=f'Completed sum for {sa_type.name}[{sa_len.value}]')
# End result: [sum]
def dynamic_array_foldl(g: Generator, tvl: TypeVariableLookup) -> None:
tv_map, tc_map = tvl
tvn_map = {
x.name: y
for x, y in tv_map.items()
}
sa_type = tvn_map['a']
res_type = tvn_map['b']
assert isinstance(sa_type, Type3)
assert isinstance(res_type, Type3)
sa_type_info = TYPE_INFO_MAP.get(sa_type.name, TYPE_INFO_CONSTRUCTED)
res_type_info = TYPE_INFO_MAP.get(res_type.name, TYPE_INFO_CONSTRUCTED)
# Definitions
fold_adr = g.temp_var(i32('fold_adr'))
fold_stop = g.temp_var(i32('fold_stop'))
fold_init = g.temp_var_t(res_type_info.wasm_type, 'fold_init')
fold_func = g.temp_var(i32('fold_func'))
fold_len = g.temp_var(i32('fold_len'))
with g.block(params=['i32', res_type_info.wasm_type, 'i32'], result=res_type_info.wasm_type, comment=f'foldl a={sa_type.name} b={res_type.name}'):
# Stack: [fn*, b, sa*] -> [fn*, b]
g.local.tee(fold_adr) # Store address, but also keep it for loading the length
g.i32.load() # Load the length
g.local.set(fold_len) # Store the length
# Stack: [fn*, b] -> [fn*]
g.local.set(fold_init)
# Stack: [fn*] -> []
g.local.set(fold_func)
# Stack: [] -> [b]
g.nop(comment='No applications if array is empty')
g.local.get(fold_init)
g.local.get(fold_len)
g.i32.eqz() # If the array is empty
g.br_if(0) # Then the base value is the result
# Stack: [b] -> [b] ; fold_adr=fold_adr + 4
g.nop(comment='Skip the header')
g.local.get(fold_adr)
g.i32.const(4)
g.i32.add()
g.local.set(fold_adr)
# Stack: [b] -> [b]
g.nop(comment='Apply the first function call')
g.local.get(fold_adr)
g.add_statement(sa_type_info.wasm_load_func)
g.local.get(fold_func)
g.call_indirect([res_type_info.wasm_type, sa_type_info.wasm_type], res_type_info.wasm_type)
# Stack: [b] -> [b]
g.nop(comment='No loop if there is only one item')
g.local.get(fold_len)
g.i32.const(1)
g.i32.eq()
g.br_if(0) # just one value, don't need to loop
# Stack: [b] -> [b] ; fold_stop=fold_adr + (sa_len.value * sa_type_info.alloc_size)
g.nop(comment='Calculate address at which to stop looping')
g.local.get(fold_adr)
g.local.get(fold_len)
g.i32.const(sa_type_info.alloc_size)
g.i32.mul()
g.i32.add()
g.local.set(fold_stop)
# Stack: [b] -> [b] ; fold_adr = fold_adr + sa_type_info.alloc_size
g.nop(comment='Calculate address of the next value')
g.local.get(fold_adr)
g.i32.const(sa_type_info.alloc_size)
g.i32.add()
g.local.set(fold_adr)
with g.loop(params=[res_type_info.wasm_type], result=res_type_info.wasm_type):
# Stack: [b] -> [b]
g.nop(comment='Apply function call')
g.local.get(fold_adr)
g.add_statement(sa_type_info.wasm_load_func)
g.local.get(fold_func)
g.call_indirect([res_type_info.wasm_type, sa_type_info.wasm_type], res_type_info.wasm_type)
# Stack: [b] -> [b] ; fold_adr = fold_adr + sa_type_info.alloc_size
g.nop(comment='Calculate address of the next value')
g.local.get(fold_adr)
g.i32.const(sa_type_info.alloc_size)
g.i32.add()
g.local.tee(fold_adr)
# loop if adr > stop
# Stack: [b] -> [b]
g.nop(comment='Check if address exceeds array bounds')
g.local.get(fold_stop)
g.i32.lt_u()
g.br_if(0)
# Stack: [b]
def static_array_foldl(g: Generator, tvl: TypeVariableLookup) -> None:
tv_map, tc_map = tvl
tvn_map = {
x.name: y
for x, y in tv_map.items()
}
sa_type = tvn_map['a']
sa_len = tvn_map['a*']
res_type = tvn_map['b']
assert isinstance(sa_type, Type3)
assert isinstance(sa_len, IntType3)
assert isinstance(res_type, Type3)
sa_type_info = TYPE_INFO_MAP.get(sa_type.name, TYPE_INFO_CONSTRUCTED)
res_type_info = TYPE_INFO_MAP.get(res_type.name, TYPE_INFO_CONSTRUCTED)
# Definitions
fold_adr = g.temp_var(i32('fold_adr'))
fold_stop = g.temp_var(i32('fold_stop'))
fold_init = g.temp_var_t(res_type_info.wasm_type, 'fold_init')
fold_func = g.temp_var(i32('fold_func'))
with g.block(params=['i32', res_type_info.wasm_type, 'i32'], result=res_type_info.wasm_type, comment=f'foldl a={sa_type.name} a*={sa_len.value} b={res_type.name}'):
# Stack: [fn*, b, sa*] -> [fn*, b]
g.local.set(fold_adr)
# Stack: [fn*, b] -> [fn*]
g.local.set(fold_init)
# Stack: [fn*] -> []
g.local.set(fold_func)
if sa_len.value < 1:
g.local.get(fold_init)
return
# Stack: [] -> [b]
g.nop(comment='Apply the first function call')
g.local.get(fold_init)
g.local.get(fold_adr)
g.add_statement(sa_type_info.wasm_load_func)
g.local.get(fold_func)
g.call_indirect([res_type_info.wasm_type, sa_type_info.wasm_type], res_type_info.wasm_type)
if sa_len.value > 1:
# Stack: [b] -> [b] ; fold_stop=fold_adr + (sa_len.value * sa_type_info.alloc_size)
g.nop(comment='Calculate address at which to stop looping')
g.local.get(fold_adr)
g.i32.const(sa_len.value * sa_type_info.alloc_size)
g.i32.add()
g.local.set(fold_stop)
# Stack: [b] -> [b] ; fold_adr = fold_adr + sa_type_info.alloc_size
g.nop(comment='Calculate address of the next value')
g.local.get(fold_adr)
g.i32.const(sa_type_info.alloc_size)
g.i32.add()
g.local.set(fold_adr)
with g.loop(params=[res_type_info.wasm_type], result=res_type_info.wasm_type):
# Stack: [b] -> [b]
g.nop(comment='Apply function call')
g.local.get(fold_adr)
g.add_statement(sa_type_info.wasm_load_func)
g.local.get(fold_func)
g.call_indirect([res_type_info.wasm_type, sa_type_info.wasm_type], res_type_info.wasm_type)
# Stack: [b] -> [b] ; fold_adr = fold_adr + sa_type_info.alloc_size
g.nop(comment='Calculate address of the next value')
g.local.get(fold_adr)
g.i32.const(sa_type_info.alloc_size)
g.i32.add()
g.local.tee(fold_adr)
# loop if adr > stop
# Stack: [b] -> [b]
g.nop(comment='Check if address exceeds array bounds')
g.local.get(fold_stop)
g.i32.lt_u()
g.br_if(0)
# else: just one value, don't need to loop
# Stack: [b]
def dynamic_array_foldr(g: Generator, tvl: TypeVariableLookup) -> None:
tv_map, tc_map = tvl
tvn_map = {
x.name: y
for x, y in tv_map.items()
}
sa_type = tvn_map['a']
res_type = tvn_map['b']
assert isinstance(sa_type, Type3)
assert isinstance(res_type, Type3)
sa_type_info = TYPE_INFO_MAP.get(sa_type.name, TYPE_INFO_CONSTRUCTED)
res_type_info = TYPE_INFO_MAP.get(res_type.name, TYPE_INFO_CONSTRUCTED)
# Definitions
fold_adr = g.temp_var(i32('fold_adr'))
fold_stop = g.temp_var(i32('fold_stop'))
fold_tmp = g.temp_var_t(res_type_info.wasm_type, 'fold_tmp')
fold_func = g.temp_var(i32('fold_func'))
fold_len = g.temp_var(i32('fold_len'))
with g.block(params=['i32', res_type_info.wasm_type, 'i32'], result=res_type_info.wasm_type, comment=f'foldr a={sa_type.name} b={res_type.name}'):
# Stack: [fn*, b, sa*] -> [fn*, b] ; fold_adr=fn*, fold_tmp=b, fold_func=fn*, fold_len=*sa
g.local.tee(fold_adr) # Store address, but also keep it for loading the length
g.i32.load() # Load the length
g.local.set(fold_len) # Store the length
# Stack: [fn*, b] -> [fn*]
g.local.set(fold_tmp)
# Stack: [fn*] -> []
g.local.set(fold_func)
# Stack: [] -> []
g.nop(comment='No applications if array is empty')
g.local.get(fold_tmp)
g.local.get(fold_len)
g.i32.eqz() # If the array is empty
g.br_if(0) # Then the base value is the result
g.drop() # Else drop the value for now
# Stack: [b] -> [b] ; fold_adr=fold_adr + 4
g.nop(comment='Skip the header')
g.local.get(fold_adr)
g.i32.const(4)
g.i32.add()
g.local.set(fold_adr)
# Stack: [] -> [] ; fold_stop=fold_adr
g.nop(comment='Calculate address at which to stop looping')
g.local.get(fold_adr)
g.local.set(fold_stop)
# Stack: [] -> [] ; fold_adr=fold_adr + (sa_len.value - 1) * sa_type_info.alloc_size
g.nop(comment='Calculate address at which to start looping')
g.local.get(fold_adr)
g.local.get(fold_len)
g.i32.const(1)
g.i32.sub()
g.i32.const(sa_type_info.alloc_size)
g.i32.mul()
g.i32.add()
g.local.set(fold_adr)
# Stack: [] -> [b]
g.nop(comment='Apply the first function call')
g.local.get(fold_adr)
g.add_statement(sa_type_info.wasm_load_func)
g.local.get(fold_tmp)
g.local.get(fold_func)
g.call_indirect([sa_type_info.wasm_type, res_type_info.wasm_type], res_type_info.wasm_type)
# Stack: [b] -> [b]
g.nop(comment='Check if more than one entry')
g.local.get(fold_len)
g.i32.const(1)
g.i32.eq() # If the array has only item
g.br_if(0) # Then the the first application is sufficient
# Stack: [b] -> [b] ; fold_adr = fold_adr - sa_type_info.alloc_size
g.nop(comment='Calculate address of the next value')
g.local.get(fold_adr)
g.i32.const(sa_type_info.alloc_size)
g.i32.sub()
g.local.set(fold_adr)
with g.loop(params=[res_type_info.wasm_type], result=res_type_info.wasm_type):
g.nop(comment='Apply function call')
# Stack [b] since we don't have proper stack switching opcodes
# Stack: [b] -> []
g.local.set(fold_tmp)
# Stack: [] -> [a]
g.local.get(fold_adr)
g.add_statement(sa_type_info.wasm_load_func)
# Stack [a] -> [a, b]
g.local.get(fold_tmp)
# Stack [a, b] -> [b]
g.local.get(fold_func)
g.call_indirect([sa_type_info.wasm_type, res_type_info.wasm_type], res_type_info.wasm_type)
# Stack: [b] -> [b] ; fold_adr = fold_adr - sa_type_info.alloc_size
g.nop(comment='Calculate address of the next value')
g.local.get(fold_adr)
g.i32.const(sa_type_info.alloc_size)
g.i32.sub()
g.local.tee(fold_adr)
# loop if adr >= stop
# Stack: [b] -> [b]
g.nop(comment='Check if address exceeds array bounds')
g.local.get(fold_stop)
g.i32.ge_u()
g.br_if(0)
# Stack: [b]
def static_array_foldr(g: Generator, tvl: TypeVariableLookup) -> None:
tv_map, tc_map = tvl
tvn_map = {
x.name: y
for x, y in tv_map.items()
}
sa_type = tvn_map['a']
sa_len = tvn_map['a*']
res_type = tvn_map['b']
assert isinstance(sa_type, Type3)
assert isinstance(sa_len, IntType3)
assert isinstance(res_type, Type3)
sa_type_info = TYPE_INFO_MAP.get(sa_type.name, TYPE_INFO_CONSTRUCTED)
res_type_info = TYPE_INFO_MAP.get(res_type.name, TYPE_INFO_CONSTRUCTED)
# Definitions
fold_adr = g.temp_var(i32('fold_adr'))
fold_stop = g.temp_var(i32('fold_stop'))
fold_tmp = g.temp_var_t(res_type_info.wasm_type, 'fold_tmp')
fold_func = g.temp_var(i32('fold_func'))
with g.block(params=['i32', res_type_info.wasm_type, 'i32'], result=res_type_info.wasm_type, comment=f'foldr a={sa_type.name} a*={sa_len.value} b={res_type.name}'):
# Stack: [fn*, b, sa*] -> [fn*, b] ; fold_adr=fn*, fold_tmp=b, fold_func=fn*
g.local.set(fold_adr)
# Stack: [fn*, b] -> [fn*]
g.local.set(fold_tmp)
# Stack: [fn*] -> []
g.local.set(fold_func)
if sa_len.value < 1:
g.local.get(fold_tmp)
return
# Stack: [] -> [] ; fold_stop=fold_adr
g.nop(comment='Calculate address at which to stop looping')
g.local.get(fold_adr)
g.local.set(fold_stop)
# Stack: [] -> [] ; fold_adr=fold_adr + (sa_len.value - 1) * sa_type_info.alloc_size
g.nop(comment='Calculate address at which to start looping')
g.local.get(fold_adr)
g.i32.const((sa_len.value - 1) * sa_type_info.alloc_size)
g.i32.add()
g.local.set(fold_adr)
# Stack: [] -> [b]
g.nop(comment='Get the init value and first array value as starting point')
g.local.get(fold_adr)
g.add_statement(sa_type_info.wasm_load_func)
g.local.get(fold_tmp)
g.local.get(fold_func)
g.call_indirect([sa_type_info.wasm_type, res_type_info.wasm_type], res_type_info.wasm_type)
if sa_len.value > 1:
# Stack: [b] -> [b] ; fold_adr = fold_adr - sa_type_info.alloc_size
g.nop(comment='Calculate address of the next value')
g.local.get(fold_adr)
g.i32.const(sa_type_info.alloc_size)
g.i32.sub()
g.local.set(fold_adr)
with g.loop(params=[res_type_info.wasm_type], result=res_type_info.wasm_type):
g.nop(comment='Apply function call')
# Stack [b] since we don't have proper stack switching opcodes
# Stack: [b] -> []
g.local.set(fold_tmp)
# Stack: [] -> [a]
g.local.get(fold_adr)
g.add_statement(sa_type_info.wasm_load_func)
# Stack [a] -> [a, b]
g.local.get(fold_tmp)
# Stack [a, b] -> [b]
g.local.get(fold_func)
g.call_indirect([sa_type_info.wasm_type, res_type_info.wasm_type], res_type_info.wasm_type)
# Stack: [b] -> [b] ; fold_adr = fold_adr - sa_type_info.alloc_size
g.nop(comment='Calculate address of the next value')
g.local.get(fold_adr)
g.i32.const(sa_type_info.alloc_size)
g.i32.sub()
g.local.tee(fold_adr)
# loop if adr >= stop
# Stack: [b] -> [b]
g.nop(comment='Check if address exceeds array bounds')
g.local.get(fold_stop)
g.i32.ge_u()
g.br_if(0)
# else: just one value, don't need to loop
# Stack: [b]