summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--.lvimrc1
-rw-r--r--.travis.yml12
-rw-r--r--Makefile538
-rw-r--r--Notes/BigChanges/Variant.md70
-rw-r--r--Notes/BorrowChecker.md7
-rw-r--r--Notes/MIR-Match.md19
-rw-r--r--Notes/MIR-Validation.txt9
-rw-r--r--Notes/todo.txt6
-rw-r--r--README.md36
-rw-r--r--run_rustc/Makefile31
-rw-r--r--rust_src.patch26
-rw-r--r--samples/test/for_underscore_drop.rs18
-rw-r--r--samples/test/scoping_rules.rs21
-rw-r--r--src/ast/crate.hpp21
-rw-r--r--src/ast/expr.cpp26
-rw-r--r--src/common.hpp123
-rw-r--r--src/expand/derive.cpp49
-rw-r--r--src/expand/file_line.cpp10
-rw-r--r--src/expand/format_args.cpp124
-rw-r--r--src/expand/lang_item.cpp5
-rw-r--r--src/expand/mod.cpp146
-rw-r--r--src/expand/stringify.cpp3
-rw-r--r--src/expand/test.cpp80
-rw-r--r--src/expand/test_harness.cpp120
-rw-r--r--src/hir/deserialise.cpp6
-rw-r--r--src/hir/dump.cpp40
-rw-r--r--src/hir/expr.cpp2
-rw-r--r--src/hir/expr.hpp9
-rw-r--r--src/hir/from_ast_expr.cpp12
-rw-r--r--src/hir/hir.cpp24
-rw-r--r--src/hir/hir.hpp6
-rw-r--r--src/hir/serialise.cpp6
-rw-r--r--src/hir/type.cpp21
-rw-r--r--src/hir_conv/constant_evaluation.cpp4
-rw-r--r--src/hir_conv/markings.cpp30
-rw-r--r--src/hir_expand/annotate_value_usage.cpp5
-rw-r--r--src/hir_expand/reborrow.cpp4
-rw-r--r--src/hir_expand/ufcs_everything.cpp4
-rw-r--r--src/hir_typeck/expr_check.cpp5
-rw-r--r--src/hir_typeck/expr_cs.cpp200
-rw-r--r--src/hir_typeck/helpers.cpp494
-rw-r--r--src/hir_typeck/helpers.hpp8
-rw-r--r--src/hir_typeck/static.cpp237
-rw-r--r--src/hir_typeck/static.hpp3
-rw-r--r--src/include/main_bindings.hpp1
-rw-r--r--src/main.cpp108
-rw-r--r--src/mir/check.cpp67
-rw-r--r--src/mir/check_full.cpp981
-rw-r--r--src/mir/cleanup.cpp2
-rw-r--r--src/mir/dump.cpp49
-rw-r--r--src/mir/from_hir.cpp327
-rw-r--r--src/mir/from_hir.hpp61
-rw-r--r--src/mir/from_hir_match.cpp3074
-rw-r--r--src/mir/helpers.cpp1301
-rw-r--r--src/mir/helpers.hpp79
-rw-r--r--src/mir/main_bindings.hpp1
-rw-r--r--src/mir/mir.cpp89
-rw-r--r--src/mir/mir.hpp17
-rw-r--r--src/mir/mir_builder.cpp1408
-rw-r--r--src/mir/operations.hpp2
-rw-r--r--src/mir/optimise.cpp796
-rw-r--r--src/parse/expr.cpp13
-rw-r--r--src/parse/lex.cpp16
-rw-r--r--src/parse/root.cpp9
-rw-r--r--src/parse/token.cpp10
-rw-r--r--src/trans/codegen.cpp3
-rw-r--r--src/trans/codegen_c.cpp247
-rw-r--r--src/trans/enumerate.cpp10
-rw-r--r--src/trans/main_bindings.hpp1
-rw-r--r--src/trans/monomorphise.cpp21
-rw-r--r--src/trans/target.cpp134
-rw-r--r--src/trans/target.hpp15
73 files changed, 8276 insertions, 3189 deletions
diff --git a/.gitignore b/.gitignore
index e85b1a2c..1468026d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,6 +25,8 @@
/vsproject/*.depend
/vsproject/*.opendb
+/run_rustc/output
+
/gitstats
/bnf/*.o
diff --git a/.lvimrc b/.lvimrc
index 7111978f..641d9423 100644
--- a/.lvimrc
+++ b/.lvimrc
@@ -1,6 +1,7 @@
set expandtab
set sts=4 ts=4 sw=4
set list
+set listchars=eol:$,tab:>-
highlight ExtraWhitespace ctermbg=darkgreen guibg=darkgreen
autocmd Syntax * syn match ExtraWhitespace /\s\+$\| \+\ze\t/
diff --git a/.travis.yml b/.travis.yml
index c9fd9118..d38cd4b5 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -6,20 +6,22 @@ addons:
apt:
sources:
- ubuntu-toolchain-r-test
+ - george-edison55-precise-backports
packages:
- g++-6
- zlib1g-dev
+ - cmake cmake-data
install:
# Build mrustc
- make RUSTCSRC
- CC=gcc-6 CXX=g++-6 make
-# Tests! (check that they parse, and keep going)
script:
-# libstd
+# libstd and hello_world
- CC=gcc-6 make test TAIL_COUNT=2
-# rustc (not the actual binary, because that doesn't emit a file yet)
- - CC=gcc-6 make output/librustc_driver.hir TAIL_COUNT=15
+# rustc
+# - CC=gcc-6 make output/rustc TAIL_COUNT=15
# Tests
-# - CC=gcc-6 make rust_tests-run-pass RUST_TESTS_FINAL_STAGE=expand -k
+ - CC=gcc-6 make local_tests -k
+ - CC=gcc-6 make rust_tests -k
diff --git a/Makefile b/Makefile
index a5c3edf4..c7b425cb 100644
--- a/Makefile
+++ b/Makefile
@@ -44,6 +44,11 @@ CXXFLAGS += -Wno-pessimizing-move
CXXFLAGS += -Wno-misleading-indentation
#CXXFLAGS += -Wno-unused-private-field
+
+# - Flags to pass to all mrustc invocations
+RUST_FLAGS := --cfg debug_assertions
+RUST_FLAGS += -g
+
SHELL = bash
ifeq ($(DBGTPL),)
@@ -79,6 +84,7 @@ OBJ += expand/include.o
OBJ += expand/env.o
OBJ += expand/test.o
OBJ += expand/rustc_diagnostics.o
+OBJ += expand/test_harness.o
OBJ += macro_rules/mod.o macro_rules/eval.o macro_rules/parse.o
OBJ += resolve/use.o resolve/index.o resolve/absolute.o
OBJ += hir/from_ast.o hir/from_ast_expr.o
@@ -92,16 +98,19 @@ OBJ += hir_typeck/outer.o hir_typeck/common.o hir_typeck/helpers.o hir_typeck/st
OBJ += hir_typeck/expr_visit.o
OBJ += hir_typeck/expr_cs.o
OBJ += hir_typeck/expr_check.o
-OBJ += hir_expand/annotate_value_usage.o hir_expand/closures.o hir_expand/ufcs_everything.o
+OBJ += hir_expand/annotate_value_usage.o hir_expand/closures.o
+OBJ += hir_expand/ufcs_everything.o
OBJ += hir_expand/reborrow.o hir_expand/erased_types.o hir_expand/vtable.o
OBJ += hir_expand/const_eval_full.o
OBJ += mir/mir.o mir/mir_ptr.o
OBJ += mir/dump.o mir/helpers.o mir/visit_crate_mir.o
OBJ += mir/from_hir.o mir/from_hir_match.o mir/mir_builder.o
OBJ += mir/check.o mir/cleanup.o mir/optimise.o
+OBJ += mir/check_full.o
OBJ += hir/serialise.o hir/deserialise.o hir/serialise_lowlevel.o
OBJ += trans/trans_list.o trans/mangling.o
OBJ += trans/enumerate.o trans/monomorphise.o trans/codegen.o trans/codegen_c.o
+OBJ += trans/target.o
PCHS := ast/ast.hpp
@@ -127,17 +136,30 @@ output/lib%.hir: $(RUSTCSRC)src/lib%/lib.rs $(RUSTCSRC) $(BIN)
@echo "--- [MRUSTC] $@"
@mkdir -p output/
@rm -f $@
- $(DBG) $(ENV_$@) $(BIN) $< -o $@ $(ARGS_$@) $(PIPECMD)
+ $(DBG) $(ENV_$@) $(BIN) $< -o $@ $(RUST_FLAGS) $(ARGS_$@) $(PIPECMD)
# # HACK: Work around gdb returning success even if the program crashed
@test -e $@
output/lib%.hir: $(RUSTCSRC)src/lib%/src/lib.rs $(RUSTCSRC) $(BIN)
@echo "--- [MRUSTC] $@"
@mkdir -p output/
@rm -f $@
- $(DBG) $(BIN) $< -o $@ $(PIPECMD)
+ $(DBG) $(ENV_$@) $(BIN) $< -o $@ $(RUST_FLAGS) $(ARGS_$@) $(PIPECMD)
+# # HACK: Work around gdb returning success even if the program crashed
+ @test -e $@
+output/lib%-test: $(RUSTCSRC)src/lib%/lib.rs $(RUSTCSRC) $(BIN) output/libtest.hir
+ @echo "--- [MRUSTC] --test -o $@"
+ @mkdir -p output/
+ @rm -f $@
+ $(DBG) $(ENV_$@) $(BIN) --test $< -o $@ -L output/libs $(RUST_FLAGS) $(ARGS_$@) $(PIPECMD)
+# # HACK: Work around gdb returning success even if the program crashed
+ @test -e $@
+output/lib%-test: $(RUSTCSRC)src/lib%/src/lib.rs $(RUSTCSRC) $(BIN) output/libtest.hir
+ @echo "--- [MRUSTC] $@"
+ @mkdir -p output/
+ @rm -f $@
+ $(DBG) $(ENV_$@) $(BIN) --test $< -o $@ -L output/libs $(RUST_FLAGS) $(ARGS_$@) $(PIPECMD)
# # HACK: Work around gdb returning success even if the program crashed
@test -e $@
-
fcn_extcrate = $(patsubst %,output/lib%.hir,$(1))
fn_getdeps = \
@@ -163,13 +185,13 @@ output/librustc_llvm.hir: $(LLVM_LINKAGE_FILE)
RUSTC_LLVM_LINKAGE: $(LLVM_LINKAGE_FILE)
output/librustc_llvm_build: rustc-nightly/src/librustc_llvm/build.rs $(call fcn_extcrate, std gcc build_helper alloc_system panic_abort)
@echo "--- [MRUSTC] $@"
- $(BIN) $< -o $@ -L output/libs $(PIPECMD)
+ $(BIN) $< -o $@ -L output/libs $(RUST_FLAGS) $(PIPECMD)
output/libgcc.hir: crates.io/gcc-0.3.28/src/lib.rs $(BIN) output/libstd.hir
@echo "--- [MRUSTC] $@"
- $(BIN) $< -o $@ --crate-type rlib --crate-name gcc $(PIPECMD)
+ $(BIN) $< -o $@ --crate-type rlib --crate-name gcc $(RUST_FLAGS) $(PIPECMD)
output/libbuild_helper.hir: rustc-nightly/src/build_helper/lib.rs $(BIN) output/libstd.hir
@echo "--- [MRUSTC] $@"
- $(BIN) $< -o $@ --crate-type rlib --crate-name build_helper $(PIPECMD)
+ $(BIN) $< -o $@ --crate-type rlib --crate-name build_helper $(RUST_FLAGS) $(PIPECMD)
crates.io/%/src/lib.rs: crates.io/%.tar.gz
tar -xf $< -C crates.io/
@@ -199,7 +221,7 @@ output/cargo_libflate/libminiz.a: output/libflate_build
output/libflate_build: rustc-nightly/src/libflate/build.rs $(call fcn_extcrate, std gcc alloc_system panic_abort)
@echo "--- [MRUSTC] $@"
- $(BIN) $< -o $@ -L output/libs $(PIPECMD)
+ $(BIN) $< -o $@ -L output/libs $(RUST_FLAGS) $(PIPECMD)
ARGS_output/librustc_llvm.hir := --cfg llvm_component=x86 --cfg cargobuild
ENV_output/librustc_llvm.hir := CFG_LLVM_LINKAGE_FILE=$(LLVM_LINKAGE_FILE)
@@ -268,14 +290,14 @@ output/rustc: $(RUSTCSRC)src/rustc/rustc.rs output/librustc_driver.hir output/ru
@echo "--- [MRUSTC] $@"
@mkdir -p output/
@rm -f $@
- $V$(DBG) $(BIN) $< -o $@ -L output/libs $$(cat output/rustc_link_opts.txt output/rustc_link_opts-libflate.txt) -l stdc++ $(PIPECMD)
+ $V$(DBG) $(BIN) $< -o $@ -L output/libs $$(cat output/rustc_link_opts.txt output/rustc_link_opts-libflate.txt) -l stdc++ $(RUST_FLAGS) $(PIPECMD)
# # HACK: Work around gdb returning success even if the program crashed
@test -e $@
.PHONY: RUSTCSRC
RUSTCSRC: $(RUSTCSRC)
-$(RUSTCSRC): rust-nightly-date
+$(RUSTCSRC): rust-nightly-date rust_src.patch
@export DL_RUST_DATE=$$(cat rust-nightly-date); \
export DISK_RUST_DATE=$$([ -f $(RUSTC_SRC_DL) ] && cat $(RUSTC_SRC_DL)); \
if [ "$$DL_RUST_DATE" != "$$DISK_RUST_DATE" ]; then \
@@ -284,6 +306,7 @@ $(RUSTCSRC): rust-nightly-date
rm -rf rustc-nightly; \
curl -sS https://static.rust-lang.org/dist/$${DL_RUST_DATE}/rustc-nightly-src.tar.gz -o rustc-nightly-src.tar.gz; \
tar -xf rustc-nightly-src.tar.gz --transform 's~^rustc-nightly-src~rustc-nightly~'; \
+ patch -p0 < rust_src.patch; \
echo "$$DL_RUST_DATE" > $(RUSTC_SRC_DL); \
fi
@@ -311,55 +334,503 @@ $(RUSTCSRC)build/Makefile: $(RUSTCSRC)src/llvm/CMakeLists.txt
$Vcd $(RUSTCSRC)build && cmake $(addprefix -D , $(LLVM_CMAKE_OPTS)) ../src/llvm
+# MRUSTC-specific tests
+.PHONY: local_tests
+local_tests: $(patsubst samples/test/%.rs,output/local_test/%_out.txt,$(wildcard samples/test/*.rs))
+
+output/local_test/%_out.txt: output/local_test/%
+ ./$< > $@
+output/local_test/%: samples/test/%.rs $(BIN)
+ mkdir -p $(dir $@)
+ $(BIN) -L output/libs -g $< -o $@ --test $(PIPECMD)
+
#
# RUSTC TESTS
#
-.PHONY: rust_tests
+.PHONY: rust_tests local_tests
RUST_TESTS_DIR := $(RUSTCSRC)src/test/
-rust_tests: rust_tests-run-pass rust_tests-run-fail
+rust_tests: rust_tests-run-pass
+# rust_tests-run-fail
# rust_tests-compile-fail
-# - Require external symbols that aren't generated.
-DISABLED_TESTS = run-pass/abi-sysv64-arg-passing run-pass/abi-sysv64-register-usage run-pass/anon-extern-mod run-pass/anon-extern-mod-cross-crate-2
+DISABLED_TESTS :=
# - NOT A TEST
DISABLED_TESTS += run-pass/backtrace-debuginfo-aux
-# - asm! is hard to trnaslate
-DISABLED_TESTS += run-pass/asm-in-out-operand run-pass/asm-indirect-memory run-pass/asm-out-assign
+DISABLED_TESTS += run-pass/mod_file_aux
+# - asm! is hard to translate
+DISABLED_TESTS += run-pass/abi-sysv64-register-usage
+DISABLED_TESTS += run-pass/asm-in-out-operand
+DISABLED_TESTS += run-pass/asm-indirect-memory
+DISABLED_TESTS += run-pass/asm-out-assign
+DISABLED_TESTS += run-pass/i128 # Unknown leader 'r'
+DISABLED_TESTS += run-pass/issue-14936
+DISABLED_TESTS += run-pass/issue-32947
+DISABLED_TESTS += run-pass/num-wrapping
+DISABLED_TESTS += run-pass/out-of-stack
# - Requires jemalloc
-DISABLED_TESTS += run-pass/allocator-default run-pass/allocator-override
+DISABLED_TESTS += run-pass/allocator-default
+DISABLED_TESTS += run-pass/allocator-override
# - Bug in inferrence order.
DISABLED_TESTS += run-pass/associated-types-conditional-dispatch
# - Lazy.
-DISABLED_TESTS += run-pass/associated-types-projection-in-where-clause run-pass/autoderef-privacy
+DISABLED_TESTS += run-pass/associated-types-projection-in-where-clause # Not normalizing bounds
+DISABLED_TESTS += run-pass/cast # Disallows cast from char to i32
+DISABLED_TESTS += run-pass/empty-struct-braces # Empty struct support
+DISABLED_TESTS += run-pass/explicit-self-generic # Tries to use HashMap as an iterator
+DISABLED_TESTS += run-pass/extern-compare-with-return-type # Specialisation with function pointers
+DISABLED_TESTS += run-pass/issue-14399 # Inferrence ran though a coercion point.
+DISABLED_TESTS += run-pass/issue-26709 # ^ (integer literal)
+DISABLED_TESTS += run-pass/issue-20797 # Failed to find impl with associated type, possible incorrect coerce?
+DISABLED_TESTS += run-pass/issue-21245 # IntoIterator on core::slice::Iterator ?
+DISABLED_TESTS += run-pass/issue-21486 # Type mismatch
+DISABLED_TESTS += run-pass/issue-21410 # Infinite recursion
+DISABLED_TESTS += run-pass/issue-25439 # ^
+DISABLED_TESTS += run-pass/issue-22629 # Auto trait + UFCS todo
+DISABLED_TESTS += run-pass/send-is-not-static-par-for # ^
+DISABLED_TESTS += run-pass/issue-22828 # ^
+DISABLED_TESTS += run-pass/issue-23699 # fn() inferrence
+DISABLED_TESTS += run-pass/issue-30371 # destructuring pattern on !
+DISABLED_TESTS += run-pass/issue-33687 # Unit struct implementing FnOnce call
+DISABLED_TESTS += run-pass/issue-38033 # Not equating associated type of type param.
+DISABLED_TESTS += run-pass/issue-7784 # PartialEq impl
+DISABLED_TESTS += run-pass/traits-issue-26339 # ^
+DISABLED_TESTS += run-pass/builtin-superkinds-self-type # ^
+DISABLED_TESTS += run-pass/intrinsic-move-val # ^
+DISABLED_TESTS += run-pass/issue-9951 # Trait impled for i32
+DISABLED_TESTS += run-pass/trait-default-method-xc # ^
+DISABLED_TESTS += run-pass/trait-impl # ^
+DISABLED_TESTS += run-pass/issue-11205 # ^
+DISABLED_TESTS += run-pass/mir_coercions # Coercion to unsafe fn
+DISABLED_TESTS += run-pass/typeck-fn-to-unsafe-fn-ptr # ^
+DISABLED_TESTS += run-pass/unsafe-coercion # ^
+DISABLED_TESTS += run-pass/mir_misc_casts # Cast fn to *const isize
+DISABLED_TESTS += run-pass/never-result # ! not correctly unifiying
+DISABLED_TESTS += run-pass/reachable-unnameable-items # assert Struct::is_Named()
+DISABLED_TESTS += run-pass/self-impl # Unable to infer
+DISABLED_TESTS += run-pass/trait-copy-guessing # ^
+DISABLED_TESTS += run-pass/issue-20575 # ^
+DISABLED_TESTS += run-pass/sync-send-iterators-in-libcore # Send for Range<_>
+DISABLED_TESTS += run-pass/const-bound # Sync for Box<_>
+DISABLED_TESTS += run-pass/traits-repeated-supertrait # Type mismatch, i64 and u64
+DISABLED_TESTS += run-pass/trans-object-shim # fn cast to other fn as type annotation
+DISABLED_TESTS += run-pass/variadic-ffi # variadics not supported
+DISABLED_TESTS += run-pass/weird-exprs # Line 17, let _ = return; result type
+DISABLED_TESTS += run-pass/where-for-self # Failed deref coercion?
+DISABLED_TESTS += run-pass/union/union-backcomp # ? discarded value?
+# - Typecheck - `_` type with no ivar index assigned
+DISABLED_TESTS += run-pass/coerce-expect-unsized
+DISABLED_TESTS += run-pass/coerce-unify-return
+DISABLED_TESTS += run-pass/issue-23485
+DISABLED_TESTS += run-pass/issue-26805
+DISABLED_TESTS += run-pass/match-vec-alternatives
+DISABLED_TESTS += run-pass/pure-sum
+DISABLED_TESTS += run-pass/struct-aliases
+# - Lazy (Typecheck - Leftover rules)
+DISABLED_TESTS += run-pass/regions-infer-borrow-scope-addr-of # Didn't unify literal ivar
+DISABLED_TESTS += run-pass/swap-2 # ^
+DISABLED_TESTS += run-pass/slice_binary_search # Didn't detect infer possiblity (&str, &String)
+# - Lazy (Typecheck - Array unsize)
+DISABLED_TESTS += run-pass/byte-literals # Over-eager inferrence
+DISABLED_TESTS += run-pass/cast-rfc0401-vtable-kinds # Spare rules
+DISABLED_TESTS += run-pass/cast-rfc0401 # Skipped coerce unsized
+DISABLED_TESTS += run-pass/dst-struct-sole # Spare rules
+DISABLED_TESTS += run-pass/dst-struct # Spare rules
+DISABLED_TESTS += run-pass/issue-23261 # Spare rules
+DISABLED_TESTS += run-pass/fat-ptr-cast # Skiped coerce unsized
+DISABLED_TESTS += run-pass/issue-21562 # ERROR - Borrow->Pointer and Unsize in one
+DISABLED_TESTS += run-pass/raw-fat-ptr # ^
+DISABLED_TESTS += run-pass/issue-9382 # Missing coercion point (struct field)
+DISABLED_TESTS += run-pass/overloaded-autoderef-indexing # Missing coercion point (struct field)
+DISABLED_TESTS += run-pass/match-byte-array-patterns # Byte string match
+DISABLED_TESTS += run-pass/mir_raw_fat_ptr # Byte string match
+DISABLED_TESTS += run-pass/never_coercions # Missed coerce
+## - Lazy (Typecheck + Trait unsize)
+#DISABLED_TESTS += run-pass/issue-27105
+#DISABLED_TESTS += run-pass/dst-coerce-rc
+DISABLED_TESTS += run-pass/dst-coercions # Skipped CoerceUnsize
+DISABLED_TESTS += run-pass/dst-raw # Skipped CoerceUnsize
+DISABLED_TESTS += run-pass/issue-11677 # Skipped
+#DISABLED_TESTS += run-pass/dst-trait
+# - Lazy (MIR)
+DISABLED_TESTS += run-pass/if-ret # If condition wasn't a bool
+DISABLED_TESTS += run-pass/intrinsics-integer # todo - bswap<i8>
+DISABLED_TESTS += run-pass/issue-11940 # todo: Match literal Borrow
+DISABLED_TESTS += run-pass/mir_build_match_comparisons # - ^
+DISABLED_TESTS += run-pass/issue-13620 # - Todo in cleanup
+DISABLED_TESTS += run-pass/vec-matching-fold # todo: Match SplitSlice with tailing (rule gen)
+DISABLED_TESTS += run-pass/issue-17877 # - SplitSlice + array
+DISABLED_TESTS += run-pass/vec-matching-fixed # ^
+DISABLED_TESTS += run-pass/vec-tail-matching # SplitSlice destructure array
+DISABLED_TESTS += run-pass/zero_sized_subslice_match # ^
+DISABLED_TESTS += run-pass/issue-18352 # - Match+const
+DISABLED_TESTS += run-pass/issue-28839 # - Move &mut ?
+DISABLED_TESTS += run-pass/union/union-inherent-method # ^ ?
+DISABLED_TESTS += run-pass/issue-21306 # ^
+DISABLED_TESTS += run-pass/issue-28950 # - Stack overflow in vec!
+DISABLED_TESTS += run-pass/mir_heavy_promoted # Stack overflow in array constant
+DISABLED_TESTS += run-pass/issue-29227 # - Excessive time in MIR lowering
+DISABLED_TESTS += run-pass/issue-15763 # No value avaliable
+DISABLED_TESTS += run-pass/issue-18110 # ^
+DISABLED_TESTS += run-pass/issue-30018-nopanic # ^
+DISABLED_TESTS += run-pass/match-bot-2 # ^
+DISABLED_TESTS += run-pass/unreachable-code # ^
+DISABLED_TESTS += run-pass/issue-36936 # - Cast removed
+DISABLED_TESTS += run-pass/struct-order-of-eval-1 # Struct init order (fails validation)
+DISABLED_TESTS += run-pass/struct-order-of-eval-3 # ^
+DISABLED_TESTS += run-pass/const-enum-vec-index # This is valid code?
+# - Lazy (trans)
+DISABLED_TESTS += run-pass/issue-21058 # Empty trait object vtable
+DISABLED_TESTS += run-pass/issue-25515 # ^
+DISABLED_TESTS += run-pass/issue-35815 # ^
+DISABLED_TESTS += run-pass/issue-29663 # Missing volatile_(load|store) intrinsic
+DISABLED_TESTS += run-pass/intrinsic-alignment # Missing pref_align_of intrinsic
+DISABLED_TESTS += run-pass/volatile-fat-ptr # ^
+DISABLED_TESTS += run-pass/newtype # Can't handle mutally recursive definitions
+DISABLED_TESTS += run-pass/transmute-specialization # Opaque type hit?
+DISABLED_TESTS += run-pass/unit-fallback # ! didn't default to ()
+DISABLED_TESTS += run-pass/issue-33387 # Missing vtable for array
+# - HIR resolve
+DISABLED_TESTS += run-pass/union/union-generic # Can't find associated type on type param
+# - Lazy (misc)
+DISABLED_TESTS += run-pass/issue-13494
+DISABLED_TESTS += run-pass/issue-6919 # Literal function pointer
+DISABLED_TESTS += run-pass/item-attributes # Attributed function after last statement leads to last statement yielded
+DISABLED_TESTS += run-pass/new-box-syntax # todo - placement syntax
+DISABLED_TESTS += run-pass/placement-in-syntax # ^
+DISABLED_TESTS += run-pass/pat-tuple-1 # assertion in "Annotate Value Usage"
+DISABLED_TESTS += run-pass/pat-tuple-2 # ^
+DISABLED_TESTS += run-pass/pat-tuple-3 # ^
+DISABLED_TESTS += run-pass/pat-tuple-4 # ^
+DISABLED_TESTS += run-pass/paths-in-macro-invocations # MISSING: qualified macro paths
+DISABLED_TESTS += run-pass/struct-path-associated-type # non-absolute path for HIR::GenericPath
+DISABLED_TESTS += run-pass/struct-path-self # ^
+DISABLED_TESTS += run-pass/ufcs-polymorphic-paths # ^
+# - Resolve
+DISABLED_TESTS += run-pass/issue-22546 # None::<u8> handling in patterns
+DISABLED_TESTS += run-pass/issue-29540 # Infinite recursion
+DISABLED_TESTS += run-pass/issue-38002 # Enum::StructVariant
+DISABLED_TESTS += run-pass/match-arm-statics # ^
+DISABLED_TESTS += run-pass/mir_ascription_coercion # Missed item
+DISABLED_TESTS += run-pass/type-ascription # Relative path in lowering
+DISABLED_TESTS += run-pass/issue-15221 # Macros in patterns
+# - Overly-restrictive consteval
+DISABLED_TESTS += run-pass/const-cast # Cast from fn() to pointer
+DISABLED_TESTS += run-pass/const-autoderef # Expected [u8]
+DISABLED_TESTS += run-pass/check-static-mut-slices
+DISABLED_TESTS += run-pass/check-static-slice
+DISABLED_TESTS += run-pass/const-binops
+DISABLED_TESTS += run-pass/const-contents
+DISABLED_TESTS += run-pass/const-deref
+DISABLED_TESTS += run-pass/const-enum-cast
+DISABLED_TESTS += run-pass/const-err
+DISABLED_TESTS += run-pass/const-fields-and-indexing
+DISABLED_TESTS += run-pass/const-fn-method
+DISABLED_TESTS += run-pass/const-fn
+DISABLED_TESTS += run-pass/const-str-ptr
+DISABLED_TESTS += run-pass/const-vec-of-fns
+DISABLED_TESTS += run-pass/diverging-fn-tail-35849
+DISABLED_TESTS += run-pass/enum-vec-initializer
+DISABLED_TESTS += run-pass/huge-largest-array
+DISABLED_TESTS += run-pass/issue-17233
+DISABLED_TESTS += run-pass/issue-19244 # Missing type info
+DISABLED_TESTS += run-pass/issue-22894 # TODO: Deref
+DISABLED_TESTS += run-pass/issue-25180 # Closure in const
+DISABLED_TESTS += run-pass/issue-27268 # ^
+DISABLED_TESTS += run-pass/issue-28189 # ^
+DISABLED_TESTS += run-pass/issue-25757 # UFCS function pointer
+DISABLED_TESTS += run-pass/mir_refs_correct
+DISABLED_TESTS += run-pass/vec-fixed-length # Overflow in costeval
+DISABLED_TESTS += run-pass/union/union-const-trans # Union literal
+# - Type defaults not supported
+DISABLED_TESTS += run-pass/default-associated-types
+DISABLED_TESTS += run-pass/default_ty_param_default_dependent_associated_type
+DISABLED_TESTS += run-pass/default_ty_param_dependent_defaults
+DISABLED_TESTS += run-pass/default_ty_param_method_call_test
+DISABLED_TESTS += run-pass/default_ty_param_struct_and_type_alias
+DISABLED_TESTS += run-pass/default_ty_param_struct
+DISABLED_TESTS += run-pass/default_ty_param_trait_impl
+DISABLED_TESTS += run-pass/default_ty_param_trait_impl_simple
+DISABLED_TESTS += run-pass/default_ty_param_type_alias
+DISABLED_TESTS += run-pass/generic-default-type-params-cross-crate
+DISABLED_TESTS += run-pass/generic-default-type-params
+# - ERROR: Function pointers in consants/statics don't trigger calls
+DISABLED_TESTS += run-pass/issue-17718
+DISABLED_TESTS += run-pass/rfc1623
+DISABLED_TESTS += run-pass/static-function-pointer-xc
+DISABLED_TESTS += run-pass/static-function-pointer
+DISABLED_TESTS += run-pass/const-block-cross-crate-fn
+DISABLED_TESTS += run-pass/const-block-item
+DISABLED_TESTS += run-pass/const-block
+# - Quirks
+DISABLED_TESTS += run-pass/autoderef-privacy # No privacy with autoderef
+DISABLED_TESTS += run-pass/fn-item-type-zero-sized # fn() items are not ZSTs
+DISABLED_TESTS += run-pass/int-abs-overflow # No overflow checks
+DISABLED_TESTS += run-pass/issue-18859 # module_path output is differend
+DISABLED_TESTS += run-pass/issue-8709 # stringify! output
+DISABLED_TESTS += run-pass/tydesc-name # ^
+DISABLED_TESTS += run-pass/concat # ^
+DISABLED_TESTS += run-pass/type-id-higher-rank-2 # lifetimes don't apply in type_id
+DISABLED_TESTS += run-pass/type-id-higher-rank # ^
+# - BUG-Expand: macro_rules
+DISABLED_TESTS += run-pass/macro-of-higher-order
+DISABLED_TESTS += run-pass/macro-pat # :pat doesn't allow MACRO
+DISABLED_TESTS += run-pass/macro-reexport-no-intermediate-use # macro_reexport failed
+DISABLED_TESTS += run-pass/macro-with-attrs1 # cfg on macro_rules
+DISABLED_TESTS += run-pass/shift-near-oflo # Scoping rules
+DISABLED_TESTS += run-pass/type-macros-simple # ^
+DISABLED_TESTS += run-pass/stmt_expr_attr_macro_parse # Orderign with :expr and #[]
+DISABLED_TESTS += run-pass/sync-send-iterators-in-libcollections # .. should match :expr
+DISABLED_TESTS += run-pass/type-macros-hlist # Mismatched arms
+# - BUG-Expand: format_args!
+DISABLED_TESTS += run-pass/ifmt # Unknown formatting type specifier '*'
+# - BUG-Expand: line/column macros don't work properly
+DISABLED_TESTS += run-pass/issue-26322
+# - Expand
+DISABLED_TESTS += run-pass/issue-11085 # No support for cfg() on enum variants
+DISABLED_TESTS += run-pass/lexer-crlf-line-endings-string-literal-doc-comment # Missing include_str!
+DISABLED_TESTS += run-pass/syntax-extension-source-utils # ^
+DISABLED_TESTS += run-pass/link-cfg-works # cfg in #[link]
+DISABLED_TESTS += run-pass/linkage1 # #[linkage]
+DISABLED_TESTS += run-pass/log_syntax-trace_macros-macro-locations # no trace_macros!
+DISABLED_TESTS += run-pass/macro-use-all-and-none # missing macro_use feature
+DISABLED_TESTS += run-pass/macro-use-both # ^
+DISABLED_TESTS += run-pass/macro-use-one # ^
+DISABLED_TESTS += run-pass/two-macro-use # ^
+DISABLED_TESTS += run-pass/simd-intrinsic-generic-cast # Missing concat_idents!
+DISABLED_TESTS += run-pass/simd-intrinsic-generic-comparison # ^
+DISABLED_TESTS += run-pass/smallest-hello-world # missing lang item
+DISABLED_TESTS += run-pass/trait-item-inside-macro # macro invocations in traits
+DISABLED_TESTS += run-pass/try-operator-custom # `?` carrier
+DISABLED_TESTS += run-pass/wrapping-int-api # cfg on match arms
+DISABLED_TESTS += run-pass/union/union-c-interop # union derive
+DISABLED_TESTS += run-pass/union/union-derive # ^
+DISABLED_TESTS += run-pass/union/union-overwrite # ? MetaItem::as_String()
+DISABLED_TESTS += run-pass/union/union-packed # ^
+DISABLED_TESTS += run-pass/union/union-pat-refutability # ^
+# - Parse
+DISABLED_TESTS += run-pass/issue-37733 # for<'a,>
+DISABLED_TESTS += run-pass/loop-break-value # `break value`
+DISABLED_TESTS += run-pass/macro-attribute-expansion # No handling of $expr in attributes
+DISABLED_TESTS += run-pass/macro-doc-escapes # Doc comments aren't attributes
+DISABLED_TESTS += run-pass/macro-doc-raw-str-hashes # ^
+DISABLED_TESTS += run-pass/macro-interpolation # $block not allowed in place of function body
+DISABLED_TESTS += run-pass/macro-stmt # ^
+DISABLED_TESTS += run-pass/macro-tt-followed-by-seq # Mismatched arms?
+DISABLED_TESTS += run-pass/struct-field-shorthand # Struct field shorthand
+DISABLED_TESTS += run-pass/vec-matching # [a, [b,..].., c]
+# HIR Lowering
+DISABLED_TESTS += run-pass/union/union-basic # Union struct pattern
+# - BUG-Parse: `use *`
+DISABLED_TESTS += run-pass/import-glob-crate
+DISABLED_TESTS += run-pass/import-prefix-macro
+# - BUG-CODEGEN: Missing symbol
+DISABLED_TESTS += run-pass/const-enum-ptr
+DISABLED_TESTS += run-pass/const-enum-vec-ptr
+DISABLED_TESTS += run-pass/const-vecs-and-slices
+DISABLED_TESTS += run-pass/issue-5688
+DISABLED_TESTS += run-pass/issue-5917
+DISABLED_TESTS += run-pass/issue-7012
+DISABLED_TESTS += run-pass/issue-29147 # Missing type
+DISABLED_TESTS += run-pass/issue-30081 # ^
+DISABLED_TESTS += run-pass/issue-3447 # ^
+DISABLED_TESTS += run-pass/issue-34796 # Missing vtable type (in dep)
+DISABLED_TESTS += run-pass/simd-generics # "platform-intrinsics"
+DISABLED_TESTS += run-pass/simd-intrinsic-generic-arithmetic # ^
+DISABLED_TESTS += run-pass/simd-intrinsic-generic-elements # ^
+DISABLED_TESTS += run-pass/thread-local-extern-static # Extern static not generated?
+# - BUG: Codegen
+DISABLED_TESTS += run-pass/unsized3 # Pointer instead of fat pointer
+DISABLED_TESTS += run-pass/utf8_idents # C backend doesn't support utf8 idents
+# - BUG: Hygine
+DISABLED_TESTS += run-pass/hygiene
+DISABLED_TESTS += run-pass/hygienic-labels-in-let
+DISABLED_TESTS += run-pass/hygienic-labels
+DISABLED_TESTS += run-pass/macro-nested_stmt_macros # hygine fires when it shouldn't
+# - Test framework required
+DISABLED_TESTS += run-pass/core-run-destroy
+DISABLED_TESTS += run-pass/exec-env
+DISABLED_TESTS += run-pass/issue-16597-empty
+DISABLED_TESTS += run-pass/issue-16597 # NOTE: Crashes in resolve
+DISABLED_TESTS += run-pass/issue-20823
+DISABLED_TESTS += run-pass/issue-34932
+DISABLED_TESTS += run-pass/issue-36768
+DISABLED_TESTS += run-pass/reexport-test-harness-main
+DISABLED_TESTS += run-pass/test-fn-signature-verification-for-explicit-return-type
+DISABLED_TESTS += run-pass/test-main-not-dead-attr
+DISABLED_TESTS += run-pass/test-main-not-dead
+DISABLED_TESTS += run-pass/test-runner-hides-buried-main
+DISABLED_TESTS += run-pass/test-runner-hides-main
+DISABLED_TESTS += run-pass/test-runner-hides-start
+DISABLED_TESTS += run-pass/test-should-fail-good-message
+DISABLED_TESTS += run-pass/test-should-panic-attr
+# - Makefile test framework quirks
+DISABLED_TESTS += run-pass/issue-18913
+DISABLED_TESTS += run-pass/issue-2380-b
+DISABLED_TESTS += run-pass/issue-29485
+DISABLED_TESTS += run-pass/svh-add-comment
+DISABLED_TESTS += run-pass/svh-add-doc
+DISABLED_TESTS += run-pass/svh-add-macro
+DISABLED_TESTS += run-pass/svh-add-nothing
+DISABLED_TESTS += run-pass/svh-add-redundant-cfg
+DISABLED_TESTS += run-pass/svh-add-whitespace
+# - Target Features
+DISABLED_TESTS += run-pass/crt-static-on-works
+DISABLED_TESTS += run-pass/sse2
+# - Infinite loops
+DISABLED_TESTS += run-pass/issue-27890 # - Stack exhausted : Resolve
+DISABLED_TESTS += run-pass/project-cache-issue-31849
+# - Impl selection
+DISABLED_TESTS += run-pass/issue-23208 # Couldn't find an impl for <T/*M:0*/ as ::TheSuperTrait<u32,>>::get
+DISABLED_TESTS += run-pass/xcrate-associated-type-defaults # Failed to find an impl
+# --- Runtime Errors ---
# - Line information that isn't avaliable due to codegen
-DISABLED_TESTS += run-pass/backtrace-debuginfo run-pass/backtrace
+DISABLED_TESTS += run-pass/backtrace-debuginfo
+DISABLED_TESTS += run-pass/backtrace
# - No unwind catching support
-DISABLED_TESTS += run-pass/binary-heap-panic-safe run-pass/box-of-array-of-drop-1 run-pass/box-of-array-of-drop-2
-# - Infinite loops
-DISABLED_TESTS += run-pass/issue-16671
+DISABLED_TESTS += run-pass/binary-heap-panic-safe
+DISABLED_TESTS += run-pass/box-of-array-of-drop-1
+DISABLED_TESTS += run-pass/box-of-array-of-drop-2
+DISABLED_TESTS += run-pass/cleanup-rvalue-temp-during-incomplete-alloc
+DISABLED_TESTS += run-pass/drop-trait-enum
+DISABLED_TESTS += run-pass/intrinsic-move-val-cleanups
+DISABLED_TESTS += run-pass/issue-14875
+DISABLED_TESTS += run-pass/issue-25089
+DISABLED_TESTS += run-pass/issue-26655
+DISABLED_TESTS += run-pass/issue-30018-panic
+DISABLED_TESTS += run-pass/issue-8460
+DISABLED_TESTS += run-pass/iter-step-overflow-debug
+DISABLED_TESTS += run-pass/iter-sum-overflow-debug
+DISABLED_TESTS += run-pass/multi-panic
+DISABLED_TESTS += run-pass/nested-vec-3
+DISABLED_TESTS += run-pass/no-landing-pads
+DISABLED_TESTS += run-pass/panic-handler-chain
+DISABLED_TESTS += run-pass/panic-handler-flail-wildly
+DISABLED_TESTS += run-pass/panic-handler-set-twice
+DISABLED_TESTS += run-pass/panic-in-dtor-drops-fields
+DISABLED_TESTS += run-pass/panic-recover-propagate
+DISABLED_TESTS += run-pass/sepcomp-unwind
+DISABLED_TESTS += run-pass/slice-panic-1
+DISABLED_TESTS += run-pass/slice-panic-2
+DISABLED_TESTS += run-pass/task-stderr
+DISABLED_TESTS += run-pass/terminate-in-initializer
+DISABLED_TESTS += run-pass/unit-like-struct-drop-run
+DISABLED_TESTS += run-pass/unwind-resource
+DISABLED_TESTS += run-pass/unwind-unique
+DISABLED_TESTS += run-pass/vector-sort-panic-safe
+DISABLED_TESTS += run-pass/dynamic-drop
+# - Misc
+DISABLED_TESTS += run-pass/issue-16671 # Blocks forever
+DISABLED_TESTS += run-pass/issue-13027 # Infinite loop (match?)
+# - BUG: Incorrect drop order of ?
+DISABLED_TESTS += run-pass/issue-23338-ensure-param-drop-order
+# - BUG: Incorrect consteval
+DISABLED_TESTS += run-pass/issue-23968-const-not-overflow # !0 / 2 incorrect value
+# - BUG: Incorrect ordering of read in binops
+DISABLED_TESTS += run-pass/issue-27054-primitive-binary-ops
+# - BUG: Enum variants not getting correct integer values (negatives)
+DISABLED_TESTS += run-pass/discriminant_value
+DISABLED_TESTS += run-pass/enum-discr
+DISABLED_TESTS += run-pass/enum-disr-val-pretty
+DISABLED_TESTS += run-pass/issue-15523-big
+DISABLED_TESTS += run-pass/issue-9837
+DISABLED_TESTS += run-pass/signed-shift-const-eval
+DISABLED_TESTS += run-pass/tag-variant-disr-val
+# - BUG: repr(size) not working
+DISABLED_TESTS += run-pass/enum-univariant-repr
+DISABLED_TESTS += run-pass/issue-15523
+# - ConstEval: Handling of enum variant casts
+DISABLED_TESTS += run-pass/issue-23304-1
+DISABLED_TESTS += run-pass/issue-23304-2
+#DISABLED_TESTS += run-pass/issue-2428
+#DISABLED_TESTS += run-pass/resolve-issue-2428
+#DISABLED_TESTS += run-pass/small-enum-range-edge
+# - BUG: Null pointer opt not fully correct
+DISABLED_TESTS += run-pass/enum-null-pointer-opt
+DISABLED_TESTS += run-pass/nonzero-enum
+DISABLED_TESTS += run-pass/nullable-pointer-opt-closures
+DISABLED_TESTS += run-pass/nullable-pointer-size
+# - BUG: Incorrect enum sizing
+DISABLED_TESTS += run-pass/enum-discrim-autosizing
+DISABLED_TESTS += run-pass/enum-discrim-manual-sizing
+DISABLED_TESTS += run-pass/enum-discrim-width-stuff
+DISABLED_TESTS += run-pass/multiple-reprs # no repr handling
+DISABLED_TESTS += run-pass/small-enums-with-fields
+DISABLED_TESTS += run-pass/type-sizes
+DISABLED_TESTS += run-pass/discrim-explicit-23030
+DISABLED_TESTS += run-pass/issue-13902
+# - BUG: Bad floats
+DISABLED_TESTS += run-pass/float-nan
+DISABLED_TESTS += run-pass/float_math
+DISABLED_TESTS += run-pass/floatlits
+DISABLED_TESTS += run-pass/intrinsics-math
+# - BUG: MIR Generation
+DISABLED_TESTS += run-pass/union/union-drop-assign # No drop when assiging to union field
+DISABLED_TESTS += run-pass/issue-4734 # Destructor on unused rvalue
+DISABLED_TESTS += run-pass/issue-8860 # No drop of un-moved arguments
+DISABLED_TESTS += run-pass/issue-15080 # Inifinte loop from incorrect match generation
+# - BUG: Codegen
+DISABLED_TESTS += run-pass/union/union-transmute # Incorrect union behavior, likey backend UB
+DISABLED_TESTS += run-pass/mir_overflow_off # out-of-range shift behavior
+DISABLED_TESTS += run-pass/dst-field-align # DST Fields aren't aligned correctly
+# - BUG: Codegen - No handling of repr()
+DISABLED_TESTS += run-pass/packed-struct-generic-layout
+DISABLED_TESTS += run-pass/packed-struct-generic-size
+DISABLED_TESTS += run-pass/packed-struct-layout
+DISABLED_TESTS += run-pass/packed-struct-size-xc
+DISABLED_TESTS += run-pass/packed-struct-size
+DISABLED_TESTS += run-pass/packed-struct-vec
+DISABLED_TESTS += run-pass/packed-tuple-struct-layout
+DISABLED_TESTS += run-pass/packed-tuple-struct-size
+# - BUG-Expand: format_args!
+DISABLED_TESTS += run-pass/format-ref-cell
+# - BUG-Expand: Copy,Clone calls Clone for inner values instead of copying
+DISABLED_TESTS += run-pass/deriving-copyclone
+# - BUG: Unknown
+DISABLED_TESTS += run-pass/process-spawn-with-unicode-params # Bad path for process spawn
+DISABLED_TESTS += run-pass/u128 # u128 not very good, unknown where error is
DEF_RUST_TESTS = $(sort $(patsubst $(RUST_TESTS_DIR)%.rs,output/rust/%_out.txt,$(wildcard $(RUST_TESTS_DIR)$1/*.rs)))
-rust_tests-run-pass: $(filter-out $(patsubst %,output/rust/%_out.txt,$(DISABLED_TESTS)), $(call DEF_RUST_TESTS,run-pass))
+rust_tests-run-pass: $(filter-out $(patsubst %,output/rust/%_out.txt,$(DISABLED_TESTS)), $(call DEF_RUST_TESTS,run-pass) $(call DEF_RUST_TESTS,run-pass/union))
rust_tests-run-fail: $(call DEF_RUST_TESTS,run-fail)
+
+LIB_TESTS := collections collectionstest rustc_data_structures
+RUNTIME_ARGS_output/libcollectionstest-test := --test-threads 1 --skip linked_list::test_ord_nan --skip ::slice::test_box_slice_clone_panics
+RUNTIME_ARGS_output/libstd-test := --test-threads 1 --skip :collections::hash::map::test_map::test_index_nonexistent
+RUNTIME_ARGS_output/libstd-test += --skip ::collections::hash::map::test_map::test_drops
+rust_tests-libs: $(patsubst %,output/lib%-test_out.txt, $(LIB_TESTS))
+
#rust_tests-compile-fail: $(call DEF_RUST_TESTS,compile-fail)
output/rust/test_run-pass_hello: $(RUST_TESTS_DIR)run-pass/hello.rs output/libstd.hir $(BIN) output/liballoc_system.hir output/libpanic_abort.hir
@mkdir -p $(dir $@)
@echo "--- [MRUSTC] -o $@"
- $(DBG) $(BIN) $< -L output/libs -o $@ $(PIPECMD)
- @echo "--- [$@]"
- @./$@
+ $(DBG) $(BIN) $< -L output/libs -o $@ $(RUST_FLAGS) $(PIPECMD)
+output/rust/test_run-pass_hello_out.txt: output/rust/test_run-pass_hello
+ @echo "--- [$<]"
+ @./$< | tee $@
+TEST_ARGS_run-pass/cfg-in-crate-1 := --cfg bar
TEST_ARGS_run-pass/cfgs-on-items := --cfg fooA --cfg fooB
-
-output/rust/%: $(RUST_TESTS_DIR)%.rs $(RUSTCSRC) $(BIN) output/libstd.hir output/libtest.hir
+TEST_ARGS_run-pass/cfg-macros-foo := --cfg foo
+TEST_ARGS_run-pass/cfg_attr := --cfg set1 --cfg set2
+TEST_ARGS_run-pass/issue-11085 := --cfg foo
+TEST_ARGS_run-pass/macro-meta-items := --cfg foo
+TEST_ARGS_run-pass/issue-21361 := -g
+TEST_ARGS_run-pass/syntax-extension-cfg := --cfg foo --cfg 'qux=foo'
+
+output/rust/%: $(RUST_TESTS_DIR)%.rs $(RUSTCSRC) $(BIN) output/libstd.hir output/libtest.hir output/test_deps/librust_test_helpers.a
@mkdir -p $(dir $@)
@echo "=== TEST $(patsubst output/rust/%,%,$@)"
@echo "--- [MRUSTC] -o $@"
- $V$(BIN) $< -o $@ -L output/libs --stop-after $(RUST_TESTS_FINAL_STAGE) $(TEST_ARGS_$*) > $@.txt 2>&1 || (tail -n 1 $@.txt; false)
-output/rust/%_out.txt: output/rust/%
+ $V$(BIN) $< -o $@ -L output/libs -L output/test_deps --stop-after $(RUST_TESTS_FINAL_STAGE) $(TEST_ARGS_$*) > $@.txt 2>&1 || (tail -n 1 $@.txt; false)
+output/%_out.txt: output/%
@echo "--- [$<]"
- @./$< > $@ || (tail -n 1 $@; false)
+ @./$< $(RUNTIME_ARGS_$<) > $@ || (tail -n 1 $@; mv $@ $@_fail; false)
+
+output/test_deps/librust_test_helpers.a: output/test_deps/rust_test_helpers.o
+ ar cur $@ $<
+output/test_deps/rust_test_helpers.o: $(RUSTCSRC)src/rt/rust_test_helpers.c
+ $(CC) -c $< -o $@
output/rust/run-pass/allocator-default.o: output/libstd.hir output/liballoc_jemalloc.hir
output/rust/run-pass/allocator-system.o: output/liballoc_system.hir
@@ -386,7 +857,7 @@ test_deps_run-pass.mk: Makefile $(wildcard $(RUST_TESTS_DIR)run_pass/*.rs)
#
# TEST: Rust standard library and the "hello, world" run-pass test
#
-test: $(RUSTCSRC) output/libcore.hir output/liballoc.hir output/libcollections.hir output/libstd.hir output/rust/test_run-pass_hello $(BIN)
+test: $(RUSTCSRC) output/libcore.hir output/liballoc.hir output/libcollections.hir output/libstd.hir output/rust/test_run-pass_hello_out.txt $(BIN)
#
# TEST: Attempt to compile rust_os (Tifflin) from ../rust_os
@@ -413,6 +884,9 @@ $(BIN): $(OBJ)
@mkdir -p $(dir $@)
@echo [CXX] -o $@
$V$(CXX) -o $@ $(LINKFLAGS) $(OBJ) $(LIBS)
+ objcopy --only-keep-debug $(BIN) $(BIN).debug
+ objcopy --add-gnu-debuglink=$(BIN).debug $(BIN)
+ strip $(BIN)
$(OBJDIR)%.o: src/%.cpp
@mkdir -p $(dir $@)
diff --git a/Notes/BigChanges/Variant.md b/Notes/BigChanges/Variant.md
new file mode 100644
index 00000000..55bb3bb0
--- /dev/null
+++ b/Notes/BigChanges/Variant.md
@@ -0,0 +1,70 @@
+
+(the below is from @ubsan)
+
+```
+// one can define their own variant, but I'm lazy
+#include <variant>
+#include <type_traits>
+#include <iostream>
+
+template <typename F, typename T, typename Void = void>
+struct return_type {};
+
+template <typename F, typename T>
+struct return_type<T, F, std::void_t<
+ decltype(std::declval<F>()(std::declval<T>()))
+>> {
+ using type = decltype(std::declval<F>()(std::declval<T>()));
+};
+
+template <typename F, typename T>
+using return_type_t = typename return_type<T, F>::type;
+
+// not sure what to call this
+template <typename F, typename... Ts>
+struct common_return_type {
+ using type = std::common_type_t<return_type_t<F, Ts>...>;
+};
+
+template <typename F, typename... Ts>
+using common_return_type_t = typename common_return_type<F, Ts...>::type;
+
+template <typename F, typename... Ts>
+auto match(
+ std::variant<Ts...>& variant, F&& functor
+) -> common_return_type_t<F, Ts...> {
+ // you could also use static_assert to make it SFINAE-unfriendly
+ return std::visit(functor, variant);
+}
+
+template <typename... Fs>
+struct overloaded : Fs... {
+ using Fs::operator()...;
+ overloaded(Fs&&... fs) : Fs(std::forward<Fs>(fs))... { }
+};
+
+int main() {
+ auto var = std::variant<int, std::string>(0);
+ std::cout << match(var, overloaded(
+ [](int i) { return i; },
+ [](std::string s) { return 0; }
+ ));
+}
+```
+
+
+
+ALTERNATIVE
+===========
+
+Could just update TU to have:
+``
+stmt.if_let(
+stmt.match_def(
+stmt.match(
+ [](::MIR::LValue::Data_Assign& se) {
+ },
+ [](::MIR::LValue::Data_Drop& se) {
+ }
+ );
+```
diff --git a/Notes/BorrowChecker.md b/Notes/BorrowChecker.md
new file mode 100644
index 00000000..78b46ff5
--- /dev/null
+++ b/Notes/BorrowChecker.md
@@ -0,0 +1,7 @@
+- On borrow, calculate lifetime of asignment (using existing lifetime code)
+ - Ignore reborrows?
+- Visit all statements in that lifetime and locate places where the borrow is propagated/stored
+ - Requires lifetime parameters on functions/&-ptrs to be present
+- Assignment of the source value during the lifetime of the borrow is an error
+- Dropping of the source value is an error
+- Returning the borrow is an error
diff --git a/Notes/MIR-Match.md b/Notes/MIR-Match.md
index f47d2c88..cc95522c 100644
--- a/Notes/MIR-Match.md
+++ b/Notes/MIR-Match.md
@@ -20,3 +20,22 @@ For each index in the rule (all rules must be the same length)
- Ranges sort after?
+
+Alternative Generator 2
+=======================
+
+Maintains match ordering
+
+1. Calculate branch rulesets (as existing)
+1. While rules to process:
+ 1. Group based on shared values.
+ 1. Generate dispatch arm for each group
+ 1. Recurse into group, passing local _ as fallback (or parent _ if none)
+
+```
+
+for
+
+```
+
+
diff --git a/Notes/MIR-Validation.txt b/Notes/MIR-Validation.txt
new file mode 100644
index 00000000..337c65cd
--- /dev/null
+++ b/Notes/MIR-Validation.txt
@@ -0,0 +1,9 @@
+
+Requirements:
+- Know full state (and linked states) for each path through the function.
+
+
+Idea:
+- Lifetime calculations (existing code)
+ - Mask out known invalid values at loopback.
+
diff --git a/Notes/todo.txt b/Notes/todo.txt
index 35526889..54956718 100644
--- a/Notes/todo.txt
+++ b/Notes/todo.txt
@@ -19,5 +19,7 @@ TODO:
- Optimise typecheck.
-- MIR: Add a Parameter type that is either LValue, Constant
- > For any place a LValue is currently used, but a constant is valid
+- MIR: Unify Variable/Argument/Temporary LValue types
+ - This should reduce the amount of code needed for validation, but will be a
+ BIG change.
+
diff --git a/README.md b/README.md
index b4704238..29e15342 100644
--- a/README.md
+++ b/README.md
@@ -8,36 +8,34 @@ This project is an attempt at creating a simple rust compiler in C++, with the u
The short-term goal is to compile pre-borrowchecked rust code into some intermediate form (e.g. LLVM IR, x86-64 assembly, or C code). Thankfully, (from what I have seen), the borrow checker is not needed to compile rust code (just to ensure that it's valid)
+
+Building Requirements
+=====================
+- C++14-compatible compiler (tested with gcc 5.4 and gcc 6)
+- C11 compatible C compiler (for output, see above)
+- `curl` (for downloading the rust source)
+- `cmake` (at least 3.4.3, required for building llvm in rustc)
+
Current Features
===
-- Attribute and macro expansion
-- Resolves all paths to absolute forms
-- Converts name-resolved AST into a more compact "HIR" (simplified module and expression tree)
-- Hackily evaluates constants
- - Constant evaluation is done by using duck-typing, which is then validated by the Type Check pass
- - This is how rustc did (or still does?) const eval before MIR
-- Type inference and checking
-- Closure and operator desugaring
-- MIR generation (with partial validation pass)
-- HIR/MIR (de)serialisation, allowing for `extern crate` handling
-- C-based code generation
-- Basic MIR optimisations (including inlining)
+- Full compilation chain including HIR and MIR stages (outputting to C)
+- Supports just x86-64 linux
+- MIR optimisations
+- Optionally-enablable exhaustive MIR validation (set the `MRUSTC_FULL_VALIDATE` environment variable)
Short-Term Plans
===
-- Parse and Typecheck all run-pass tests
-- Compile a running rustc
+- Fix currently-failing tests (mostly in type inferrence)
+- Fix all known TODOs in MIR generation (still some possible leaks)
Medium-Term Goals
===
-- Extensive MIR optimisations
- Propagate lifetime annotations so that MIR can include a borrow checker
Progress
===
-- Compiles the standard library into loadable MIR
-- Compiles the "hello, world" test into compilable and running C code
-- Compiles `rustc`
- - Generated code is likely not correct
+- Compiles static libraries into loadable HIR tree and MIR code
+- Generates working executables (most of the test suite)
+- Compiles `rustc` that can compile the standard library and "hello, world"
diff --git a/run_rustc/Makefile b/run_rustc/Makefile
new file mode 100644
index 00000000..9a7a2ed2
--- /dev/null
+++ b/run_rustc/Makefile
@@ -0,0 +1,31 @@
+RUSTC := ../output/rustc
+RUST_SRC := ../rustc-nightly/src/
+
+all: output/libstd.rlib output/hello_world
+
+RUSTFLAGS_output/liblibc.rlib := --cfg stdbuild
+
+$(RUSTC):
+ make -C ../ output/rustc -j 3
+
+output/%.rlib: $(RUST_SRC)%/lib.rs $(RUSTC)
+ mkdir -p $(dir $@)
+ time $(RUSTC) $(RUSTFLAGS_$@) -L output/ -L ../output/libs $< -o $@
+output/%.rlib: $(RUST_SRC)%/src/lib.rs $(RUSTC)
+ mkdir -p $(dir $@)
+ time $(RUSTC) $(RUSTFLAGS_$@) -L output/ -L ../output/libs $< -o $@
+
+output/hello_world: $(RUST_SRC)test/run-pass/hello.rs output/libstd.rlib $(RUSTC)
+ time $(RUSTC) $(RUSTFLAGS_$@) -L output/ -L ../output/libs $< -o $@
+
+fcn_extcrate = $(patsubst %,output/lib%.rlib,$(1))
+
+output/libarena.rlib: output/libstd.rlib
+output/liballoc.rlib: output/libcore.rlib
+output/libstd_unicode.rlib: output/libcore.rlib
+output/libcollections.rlib: output/libcore.rlib output/liballoc.rlib output/libstd_unicode.rlib
+output/librand.rlib: output/libcore.rlib
+output/liblibc.rlib: output/libcore.rlib
+output/libcompiler_builtins.rlib: output/libcore.rlib
+output/libstd.rlib: $(call fcn_extcrate, core collections rand libc unwind compiler_builtins alloc_system panic_unwind)
+output/libunwind.rlib: $(call fcn_extcrate, core libc)
diff --git a/rust_src.patch b/rust_src.patch
new file mode 100644
index 00000000..ac479382
--- /dev/null
+++ b/rust_src.patch
@@ -0,0 +1,26 @@
+--- rustc-nightly/src/libcore/intrinsics.rs
++++ rustc-nightly/src/libcore/intrinsics.rs
+@@ -643,5 +643,9 @@
+ pub fn drop_in_place<T: ?Sized>(to_drop: *mut T);
+
++ /// Obtain the length of a slice pointer
++ #[cfg(rust_compiler="mrustc")]
++ pub fn mrustc_slice_len<T>(pointer: *const [T]) -> usize;
++
+ /// Gets a static string slice containing the name of a type.
+ pub fn type_name<T: ?Sized>() -> &'static str;
+
+--- rustc-nightly/src/libcore/slice.rs
++++ rustc-nightly/src/libcore/slice.rs
+@@ -340,6 +340,8 @@
+ #[inline]
+ fn len(&self) -> usize {
+- unsafe {
+- mem::transmute::<&[T], Repr<T>>(self).len
+- }
++ #[cfg(not(rust_compiler="mrustc"))]
++ let rv = unsafe { mem::transmute::<&[T], Repr<T>>(self).len };
++ #[cfg(rust_compiler="mrustc")]
++ let rv = unsafe { ::intrinsics::mrustc_slice_len(self) };
++ rv
+ }
diff --git a/samples/test/for_underscore_drop.rs b/samples/test/for_underscore_drop.rs
new file mode 100644
index 00000000..e590fee0
--- /dev/null
+++ b/samples/test/for_underscore_drop.rs
@@ -0,0 +1,18 @@
+
+struct DropFlag<'a>(&'a mut i32);
+impl<'a> ::std::ops::Drop for DropFlag<'a>
+{
+ fn drop(&mut self) {
+ *self.0 += 1;
+ }
+}
+
+#[test]
+fn values_in_for_loop_dropped()
+{
+ let mut foo = 0;
+ for _ in Some(DropFlag(&mut foo))
+ {
+ }
+ assert_eq!(foo, 1);
+}
diff --git a/samples/test/scoping_rules.rs b/samples/test/scoping_rules.rs
new file mode 100644
index 00000000..9878fd16
--- /dev/null
+++ b/samples/test/scoping_rules.rs
@@ -0,0 +1,21 @@
+
+struct DropFlag<'a>(&'a ::std::cell::Cell<i32>);
+impl<'a> ::std::ops::Drop for DropFlag<'a>
+{
+ fn drop(&mut self) {
+ self.0.set( self.0.get() + 1 );
+ }
+}
+
+// Any temporaries defined in the expression part of a statement (i.e. in the yeilded part of a
+// block) are stored in the parent scope.
+#[test]
+fn temporaries_in_yielded_expr()
+{
+ let drop_count = ::std::cell::Cell::new(0);
+ let _foo = ({ DropFlag(&drop_count).0 }, assert_eq!(drop_count.get(), 0) );
+ drop(_foo);
+ assert_eq!(drop_count.get(), 1);
+}
+
+
diff --git a/src/ast/crate.hpp b/src/ast/crate.hpp
index b74012a3..f9594a83 100644
--- a/src/ast/crate.hpp
+++ b/src/ast/crate.hpp
@@ -10,6 +10,23 @@ namespace AST {
class ExternCrate;
+class TestDesc
+{
+public:
+ ::AST::Path path;
+ ::std::string name;
+ bool ignore = false;
+ bool is_benchmark = false;
+
+ enum class ShouldPanic {
+ No,
+ Yes,
+ YesWithMessage,
+ } panic_type = ShouldPanic::No;
+
+ ::std::string expected_panic_message;
+};
+
class Crate
{
public:
@@ -22,6 +39,10 @@ public:
// Mapping filled by searching for (?visible) macros with is_pub=true
::std::map< ::std::string, const MacroRules*> m_exported_macros;
+ // List of tests (populated in expand if --test is passed)
+ bool m_test_harness = false;
+ ::std::vector<TestDesc> m_tests;
+
enum class Type {
Unknown,
RustLib,
diff --git a/src/ast/expr.cpp b/src/ast/expr.cpp
index 65048183..dd586683 100644
--- a/src/ast/expr.cpp
+++ b/src/ast/expr.cpp
@@ -85,9 +85,7 @@ NODE(ExprNode_Block, {
::std::vector<ExprNodeP> nodes;
for(const auto& n : m_nodes)
nodes.push_back( n->clone() );
- if( m_local_mod )
- TODO(get_pos(), "Handle cloning ExprNode_Block with a module");
- return NEWNODE(ExprNode_Block, m_is_unsafe, m_yields_final_value, mv$(nodes), nullptr);
+ return NEWNODE(ExprNode_Block, m_is_unsafe, m_yields_final_value, mv$(nodes), m_local_mod);
})
NODE(ExprNode_Macro, {
@@ -266,7 +264,16 @@ NODE(ExprNode_ByteString, {
})
NODE(ExprNode_Closure, {
- os << "/* todo: closure */";
+ if( m_is_move )
+ os << "move ";
+ os << "|";
+ for(const auto& a : m_args)
+ {
+ os << a.first << ": " << a.second << ",";
+ }
+ os << "|";
+ os << "->" << m_return;
+ os << " " << *m_code;
},{
ExprNode_Closure::args_t args;
for(const auto& a : m_args) {
@@ -276,7 +283,16 @@ NODE(ExprNode_Closure, {
});
NODE(ExprNode_StructLiteral, {
- os << "/* todo: sl */";
+ os << m_path << " { ";
+ for(const auto& v : m_values)
+ {
+ os << v.first << ": " << *v.second << ", ";
+ }
+ if(m_base_value)
+ {
+ os << ".." << *m_base_value;
+ }
+ os << "}";
},{
ExprNode_StructLiteral::t_values vals;
diff --git a/src/common.hpp b/src/common.hpp
index 76940d25..55e0f833 100644
--- a/src/common.hpp
+++ b/src/common.hpp
@@ -72,26 +72,56 @@ static inline Ordering ord(bool l, bool r)
else
return OrdLess;
}
+static inline Ordering ord(char l, char r)
+{
+ return (l == r ? OrdEqual : (l > r ? OrdGreater : OrdLess));
+}
+
+static inline Ordering ord(unsigned char l, unsigned char r)
+{
+ return (l == r ? OrdEqual : (l > r ? OrdGreater : OrdLess));
+}
+static inline Ordering ord(unsigned short l, unsigned short r)
+{
+ return (l == r ? OrdEqual : (l > r ? OrdGreater : OrdLess));
+}
static inline Ordering ord(unsigned l, unsigned r)
{
- if(l == r)
- return OrdEqual;
- else if( l > r )
- return OrdGreater;
- else
- return OrdLess;
+ return (l == r ? OrdEqual : (l > r ? OrdGreater : OrdLess));
}
-#if UINTPTR_MAX != UINT_MAX
-static inline Ordering ord(::std::uintptr_t l, ::std::uintptr_t r)
+static inline Ordering ord(unsigned long l, unsigned long r)
{
- if(l == r)
- return OrdEqual;
- else if( l > r )
- return OrdGreater;
- else
- return OrdLess;
+ return (l == r ? OrdEqual : (l > r ? OrdGreater : OrdLess));
}
-#endif
+static inline Ordering ord(unsigned long long l, unsigned long long r)
+{
+ return (l == r ? OrdEqual : (l > r ? OrdGreater : OrdLess));
+}
+static inline Ordering ord(signed char l, signed char r)
+{
+ return (l == r ? OrdEqual : (l > r ? OrdGreater : OrdLess));
+}
+static inline Ordering ord(short l, short r)
+{
+ return (l == r ? OrdEqual : (l > r ? OrdGreater : OrdLess));
+}
+static inline Ordering ord(long l, long r)
+{
+ return (l == r ? OrdEqual : (l > r ? OrdGreater : OrdLess));
+}
+static inline Ordering ord(long long l, long long r)
+{
+ return (l == r ? OrdEqual : (l > r ? OrdGreater : OrdLess));
+}
+static inline Ordering ord(float l, float r)
+{
+ return (l == r ? OrdEqual : (l > r ? OrdGreater : OrdLess));
+}
+static inline Ordering ord(double l, double r)
+{
+ return (l == r ? OrdEqual : (l > r ? OrdGreater : OrdLess));
+}
+
static inline Ordering ord(const ::std::string& l, const ::std::string& r)
{
if(l == r)
@@ -302,19 +332,8 @@ struct FmtEscaped {
FmtEscaped(const ::std::string& s):
s(s.c_str())
{}
- friend ::std::ostream& operator<<(::std::ostream& os, const FmtEscaped& x) {
- for(auto s = x.s; *s != '\0'; s ++)
- {
- switch(*s)
- {
- case '\n': os << "\\n"; break;
- case '\\': os << "\\\\"; break;
- case '"': os << "\\\""; break;
- default: os << *s; break;
- }
- }
- return os;
- }
+ // See main.cpp
+ friend ::std::ostream& operator<<(::std::ostream& os, const FmtEscaped& x);
};
// -------------------------------------------------------------------
@@ -333,4 +352,52 @@ auto end (reversion_wrapper<T> w) { return w.iterable.rend(); }
template <typename T>
reversion_wrapper<T> reverse (T&& iterable) { return { iterable }; }
+
+template<typename T>
+struct RunIterable {
+ const ::std::vector<T>& list;
+ unsigned int ofs;
+ ::std::pair<size_t,size_t> cur;
+ RunIterable(const ::std::vector<T>& list):
+ list(list), ofs(0)
+ {
+ advance();
+ }
+ void advance() {
+ if( ofs < list.size() )
+ {
+ auto start = ofs;
+ while(ofs < list.size() && list[ofs] == list[start])
+ ofs ++;
+ cur = ::std::make_pair(start, ofs-1);
+ }
+ else
+ {
+ ofs = list.size()+1;
+ }
+ }
+ RunIterable<T> begin() { return *this; }
+ RunIterable<T> end() { auto rv = *this; rv.ofs = list.size()+1; return rv; }
+ bool operator==(const RunIterable<T>& x) {
+ return x.ofs == ofs;
+ }
+ bool operator!=(const RunIterable<T>& x) {
+ return !(*this == x);
+ }
+ void operator++() {
+ advance();
+ }
+ const ::std::pair<size_t,size_t>& operator*() const {
+ return this->cur;
+ }
+ const ::std::pair<size_t,size_t>* operator->() const {
+ return &this->cur;
+ }
+};
+template<typename T>
+RunIterable<T> runs(const ::std::vector<T>& x) {
+ return RunIterable<T>(x);
+}
+
+
#endif
diff --git a/src/expand/derive.cpp b/src/expand/derive.cpp
index 58eb1f96..c363b689 100644
--- a/src/expand/derive.cpp
+++ b/src/expand/derive.cpp
@@ -369,43 +369,56 @@ public:
pat_a = AST::Pattern(AST::Pattern::TagValue(), AST::Pattern::Value::make_Named(base_path + v.m_name));
),
(Tuple,
- // TODO: Complete this.
::std::vector<AST::Pattern> pats_a;
- //::std::vector<AST::ExprNodeP> nodes;
+ AST::ExprNodeP node;
+
+ node = NEWNODE(NamedValue, AST::Path("f"));
+ node = NEWNODE(CallMethod,
+ mv$(node), AST::PathNode("debug_tuple",{}),
+ vec$( NEWNODE(String, v.m_name) )
+ );
for( unsigned int idx = 0; idx < e.m_sub_types.size(); idx ++ )
{
auto name_a = FMT("a" << idx);
pats_a.push_back( ::AST::Pattern(::AST::Pattern::TagBind(), name_a, ::AST::PatternBinding::Type::REF) );
- //nodes.push_back( this->assert_is_eq(assert_method_path, NEWNODE(NamedValue, AST::Path(name_a))) );
- }
- //code = NEWNODE(Block, mv$(nodes));
- code = NEWNODE(CallMethod,
- NEWNODE(NamedValue, AST::Path("f")),
- AST::PathNode("write_str",{}),
- vec$( NEWNODE(String, v.m_name + "(...)") )
- );
+ node = NEWNODE(CallMethod,
+ mv$(node), AST::PathNode("field",{}),
+ vec$(
+ NEWNODE(NamedValue, AST::Path(name_a))
+ )
+ );
+ }
+ code = NEWNODE(CallMethod, mv$(node), AST::PathNode("finish",{}), {});
pat_a = AST::Pattern(AST::Pattern::TagNamedTuple(), base_path + v.m_name, mv$(pats_a));
),
(Struct,
::std::vector< ::std::pair<std::string, AST::Pattern> > pats_a;
- //::std::vector<AST::ExprNodeP> nodes;
+ AST::ExprNodeP node;
+
+ node = NEWNODE(NamedValue, AST::Path("f"));
+ node = NEWNODE(CallMethod,
+ mv$(node), AST::PathNode("debug_struct",{}),
+ vec$( NEWNODE(String, v.m_name) )
+ );
for( const auto& fld : e.m_fields )
{
auto name_a = FMT("a" << fld.m_name);
pats_a.push_back( ::std::make_pair(fld.m_name, ::AST::Pattern(::AST::Pattern::TagBind(), name_a, ::AST::PatternBinding::Type::REF)) );
- //nodes.push_back( this->assert_is_eq(assert_method_path, NEWNODE(NamedValue, AST::Path(name_a))) );
+
+ node = NEWNODE(CallMethod,
+ mv$(node), AST::PathNode("field",{}),
+ vec$(
+ NEWNODE(String, fld.m_name),
+ NEWNODE(NamedValue, AST::Path(name_a))
+ )
+ );
}
- //code = NEWNODE(Block, mv$(nodes) );
- code = NEWNODE(CallMethod,
- NEWNODE(NamedValue, AST::Path("f")),
- AST::PathNode("write_str",{}),
- vec$( NEWNODE(String, v.m_name + "{...}") )
- );
+ code = NEWNODE(CallMethod, mv$(node), AST::PathNode("finish",{}), {});
pat_a = AST::Pattern(AST::Pattern::TagStruct(), base_path + v.m_name, mv$(pats_a), true);
)
)
diff --git a/src/expand/file_line.cpp b/src/expand/file_line.cpp
index a485fe61..8dfb7e6d 100644
--- a/src/expand/file_line.cpp
+++ b/src/expand/file_line.cpp
@@ -27,6 +27,15 @@ class CExpanderLine:
}
};
+class CExpanderColumn:
+ public ExpandProcMacro
+{
+ ::std::unique_ptr<TokenStream> expand(const Span& sp, const AST::Crate& crate, const ::std::string& ident, const TokenTree& tt, AST::Module& mod) override
+ {
+ return box$( TTStreamO(TokenTree(Token((uint64_t)sp.start_ofs, CORETYPE_U32))) );
+ }
+};
+
class CExpanderModulePath:
public ExpandProcMacro
{
@@ -44,5 +53,6 @@ class CExpanderModulePath:
STATIC_MACRO("file", CExpanderFile);
STATIC_MACRO("line", CExpanderLine);
+STATIC_MACRO("column", CExpanderColumn);
STATIC_MACRO("module_path", CExpanderModulePath);
diff --git a/src/expand/format_args.cpp b/src/expand/format_args.cpp
index deda6968..92afcbb2 100644
--- a/src/expand/format_args.cpp
+++ b/src/expand/format_args.cpp
@@ -140,6 +140,11 @@ namespace {
if( *s != '{' )
{
if( *s == '}' ) {
+ s ++;
+ if( *s != '}' ) {
+ // TODO: Error? Warning?
+ s --; // Step backwards, just in case
+ }
// Doesn't need escaping
cur_literal += '}';
}
@@ -247,6 +252,7 @@ namespace {
if( *s == '0' ) {
args.zero_pad = true;
+ args.align_char = '0';
s ++;
}
else {
@@ -407,6 +413,24 @@ namespace {
toks.push_back( Token(TOK_IDENT, ent) );
}
}
+ void push_toks(::std::vector<TokenTree>& toks, Token t1) {
+ toks.push_back( mv$(t1) );
+ }
+ void push_toks(::std::vector<TokenTree>& toks, Token t1, Token t2) {
+ toks.push_back( mv$(t1) );
+ toks.push_back( mv$(t2) );
+ }
+ //void push_toks(::std::vector<TokenTree>& toks, Token t1, Token t2, Token t3) {
+ // toks.push_back( mv$(t1) );
+ // toks.push_back( mv$(t2) );
+ // toks.push_back( mv$(t3) );
+ //}
+ void push_toks(::std::vector<TokenTree>& toks, Token t1, Token t2, Token t3, Token t4) {
+ toks.push_back( mv$(t1) );
+ toks.push_back( mv$(t2) );
+ toks.push_back( mv$(t3) );
+ toks.push_back( mv$(t4) );
+ }
}
class CFormatArgsExpander:
@@ -576,10 +600,12 @@ class CFormatArgsExpander:
}
else // if(is_simple)
{
+ // 1. Generate a set of arguments+formatters
+ // > Each combination of argument index and fragment type needs a unique entry in the `args` array
+
// Use new_v1_formatted
// - requires creating more entries in the `args` list to cover multiple formatters for one value
- //push_path(toks, crate, {"fmt", "Arguments", "new_v1_formatted"});
- push_path(toks, crate, {"fmt", "Arguments", "new_v1"});
+ push_path(toks, crate, {"fmt", "Arguments", "new_v1_formatted"});
// (
toks.push_back( TokenTree(TOK_PAREN_OPEN) );
{
@@ -587,14 +613,100 @@ class CFormatArgsExpander:
toks.push_back( Token(TOK_IDENT, "FRAGMENTS") );
toks.push_back( TokenTree(TOK_COMMA) );
- // 1. Generate a set of arguments+formatters
-
// TODO: Fragments to format
// - The format stored by mrustc doesn't quite work with how rustc (and fmt::rt::v1) works
toks.push_back( TokenTree(TOK_AMP) );
toks.push_back( TokenTree(TOK_SQUARE_OPEN) );
- //for(const auto& frag : fragments ) {
- //}
+ for(const auto& frag : fragments )
+ {
+ push_path(toks, crate, {"fmt", "ArgumentV1", "new"});
+ toks.push_back( Token(TOK_PAREN_OPEN) );
+ toks.push_back( Token(TOK_IDENT, FMT("a" << frag.arg_index)) );
+
+ toks.push_back( TokenTree(TOK_COMMA) );
+
+ push_path(toks, crate, {"fmt", frag.trait_name, "fmt"});
+ toks.push_back( TokenTree(TOK_PAREN_CLOSE) );
+ toks.push_back( TokenTree(TOK_COMMA) );
+ }
+ toks.push_back( TokenTree(TOK_SQUARE_CLOSE) );
+ toks.push_back( TokenTree(TOK_COMMA) );
+
+ toks.push_back( TokenTree(TOK_AMP) );
+ toks.push_back( TokenTree(TOK_SQUARE_OPEN) );
+ for(const auto& frag : fragments)
+ {
+ push_path(toks, crate, {"fmt", "rt", "v1", "Argument"});
+ toks.push_back( TokenTree(TOK_BRACE_OPEN) );
+
+ push_toks(toks, Token(TOK_IDENT, "position"), TOK_COLON );
+ push_path(toks, crate, {"fmt", "rt", "v1", "Position", "Next"});
+ push_toks(toks, TOK_COMMA);
+
+ push_toks(toks, Token(TOK_IDENT, "format"), TOK_COLON );
+ push_path(toks, crate, {"fmt", "rt", "v1", "FormatSpec"});
+ toks.push_back( TokenTree(TOK_BRACE_OPEN) );
+ {
+ push_toks(toks, Token(TOK_IDENT, "fill"), TOK_COLON, Token(uint64_t(frag.args.align_char), CORETYPE_CHAR), TOK_COMMA );
+
+ push_toks(toks, Token(TOK_IDENT, "align"), TOK_COLON);
+ const char* align_var_name = nullptr;
+ switch( frag.args.align )
+ {
+ case FmtArgs::Align::Unspec: align_var_name = "Unknown"; break;
+ case FmtArgs::Align::Left: align_var_name = "Left"; break;
+ case FmtArgs::Align::Center: align_var_name = "Center"; break;
+ case FmtArgs::Align::Right: align_var_name = "Right"; break;
+ }
+ push_path(toks, crate, {"fmt", "rt", "v1", "Alignment", align_var_name});
+ push_toks(toks, TOK_COMMA);
+
+ push_toks(toks, Token(TOK_IDENT, "flags"), TOK_COLON);
+ uint64_t flags = 0;
+ if(frag.args.alternate)
+ flags |= 1 << 2;
+ push_toks(toks, Token(uint64_t(flags), CORETYPE_U32));
+ push_toks(toks, TOK_COMMA);
+
+ push_toks(toks, Token(TOK_IDENT, "precision"), TOK_COLON );
+ if( frag.args.prec_is_arg || frag.args.prec != 0 ) {
+ push_path(toks, crate, {"fmt", "rt", "v1", "Count", "Is"});
+ push_toks(toks, TOK_PAREN_OPEN);
+ if( frag.args.prec_is_arg ) {
+ push_toks(toks, TOK_STAR, Token(TOK_IDENT, FMT("a" << frag.args.prec)) );
+ }
+ else {
+ push_toks(toks, Token(uint64_t(frag.args.prec), CORETYPE_UINT) );
+ }
+ toks.push_back( TokenTree(TOK_PAREN_CLOSE) );
+ }
+ else {
+ push_path(toks, crate, {"fmt", "rt", "v1", "Count", "Implied"});
+ }
+ toks.push_back( TokenTree(TOK_COMMA) );
+
+ push_toks(toks, Token(TOK_IDENT, "width"), TOK_COLON );
+ if( frag.args.width_is_arg || frag.args.width != 0 ) {
+ push_path(toks, crate, {"fmt", "rt", "v1", "Count", "Is"});
+ push_toks(toks, TOK_PAREN_OPEN);
+ if( frag.args.width_is_arg ) {
+ push_toks(toks, TOK_STAR, Token(TOK_IDENT, FMT("a" << frag.args.width)) );
+ }
+ else {
+ push_toks(toks, Token(uint64_t(frag.args.width), CORETYPE_UINT) );
+ }
+ toks.push_back( TokenTree(TOK_PAREN_CLOSE) );
+ }
+ else {
+ push_path(toks, crate, {"fmt", "rt", "v1", "Count", "Implied"});
+ }
+ toks.push_back( TokenTree(TOK_COMMA) );
+ }
+ toks.push_back( TokenTree(TOK_BRACE_CLOSE) );
+
+ toks.push_back( TokenTree(TOK_BRACE_CLOSE) );
+ toks.push_back( TokenTree(TOK_COMMA) );
+ }
toks.push_back( TokenTree(TOK_SQUARE_CLOSE) );
}
// )
diff --git a/src/expand/lang_item.cpp b/src/expand/lang_item.cpp
index 0a5c87f5..988dc0e0 100644
--- a/src/expand/lang_item.cpp
+++ b/src/expand/lang_item.cpp
@@ -187,7 +187,10 @@ public:
AttrStage stage() const override { return AttrStage::Post; }
void handle(const Span& sp, const AST::MetaItem& attr, AST::Crate& crate, const AST::Path& path, AST::Module& mod, AST::Item& i) const override
{
- TU_IFLET(::AST::Item, i, Function, e,
+ if( i.is_None() ) {
+ // Ignore.
+ }
+ else TU_IFLET(::AST::Item, i, Function, e,
auto rv = crate.m_lang_items.insert(::std::make_pair( ::std::string("mrustc-main"), ::AST::Path(path) ));
if( !rv.second )
{
diff --git a/src/expand/mod.cpp b/src/expand/mod.cpp
index f152b1ab..788b2109 100644
--- a/src/expand/mod.cpp
+++ b/src/expand/mod.cpp
@@ -67,7 +67,7 @@ void Expand_Attrs(/*const */::AST::MetaItems& attrs, AttrStage stage, ::std::fu
}
void Expand_Attrs(::AST::MetaItems& attrs, AttrStage stage, ::AST::Crate& crate, const ::AST::Path& path, ::AST::Module& mod, ::AST::Item& item)
{
- Expand_Attrs(attrs, stage, [&](const auto& sp, const auto& d, const auto& a){ d.handle(sp, a, crate, path, mod, item); });
+ Expand_Attrs(attrs, stage, [&](const auto& sp, const auto& d, const auto& a){ if(!item.is_None()) d.handle(sp, a, crate, path, mod, item); });
}
void Expand_Attrs(::AST::MetaItems& attrs, AttrStage stage, ::AST::Crate& crate, ::AST::Module& mod, ::AST::ImplDef& impl)
{
@@ -109,7 +109,8 @@ void Expand_Attrs(::AST::MetaItems& attrs, AttrStage stage, ::AST::Crate& crate
return e;
}
}
- // TODO: Shouldn't this use the _last_ located macro? Allowing later (local) defininitions to override it?
+ // Find the last macro of this name (allows later #[macro_use] definitions to override)
+ const MacroRules* last_mac = nullptr;
for( const auto& mri : mac_mod.macro_imports_res() )
{
//DEBUG("- " << mri.name);
@@ -118,10 +119,14 @@ void Expand_Attrs(::AST::MetaItems& attrs, AttrStage stage, ::AST::Crate& crate
if( input_ident != "" )
ERROR(mi_span, E0000, "macro_rules! macros can't take an ident");
- auto e = Macro_Invoke(name.c_str(), *mri.data, mv$(input_tt), mod);
- return e;
+ last_mac = mri.data;
}
}
+ if( last_mac )
+ {
+ auto e = Macro_Invoke(name.c_str(), *last_mac, mv$(input_tt), mod);
+ return e;
+ }
}
// Error - Unknown macro name
@@ -307,11 +312,9 @@ struct CExpandExpr:
assert( ! this->replacement );
}
void visit_vector(::std::vector< ::std::unique_ptr<AST::ExprNode> >& cnodes) {
- for( auto& child : cnodes ) {
- this->visit(child);
- }
- // Delete null children
for( auto it = cnodes.begin(); it != cnodes.end(); ) {
+ assert( it->get() );
+ this->visit(*it);
if( it->get() == nullptr ) {
it = cnodes.erase( it );
}
@@ -321,53 +324,94 @@ struct CExpandExpr:
}
}
- void visit(::AST::ExprNode_Macro& node) override
+ ::AST::ExprNodeP visit_macro(::AST::ExprNode_Macro& node, ::std::vector< ::AST::ExprNodeP>* nodes_out)
{
- TRACE_FUNCTION_F("ExprNode_Macro - name = " << node.m_name);
+ TRACE_FUNCTION_F(node.m_name << "!");
if( node.m_name == "" ) {
- return ;
+ return ::AST::ExprNodeP();
}
+ ::AST::ExprNodeP rv;
auto& mod = this->cur_mod();
- auto ttl = Expand_Macro(
- crate, modstack, mod,
- Span(node.get_pos()),
- node.m_name, node.m_ident, node.m_tokens
- );
- if( ttl.get() != nullptr )
+ auto ttl = Expand_Macro( crate, modstack, mod, Span(node.get_pos()), node.m_name, node.m_ident, node.m_tokens );
+ if( !ttl.get() )
+ {
+ // No expansion
+ }
+ else
{
- if( ttl->lookahead(0) != TOK_EOF )
+ while( ttl->lookahead(0) != TOK_EOF )
{
SET_MODULE( (*ttl), mod );
+
// Reparse as expression / item
bool add_silence_if_end = false;
::std::shared_ptr< AST::Module> tmp_local_mod;
auto& local_mod_ptr = (this->current_block ? this->current_block->m_local_mod : tmp_local_mod);
- DEBUG("-- Parsing as expression line (legacy)");
+ DEBUG("-- Parsing as expression line");
auto newexpr = Parse_ExprBlockLine_WithItems(*ttl, local_mod_ptr, add_silence_if_end);
+
+ if( tmp_local_mod )
+ TODO(node.get_pos(), "Handle edge case where a macro expansion outside of a _Block creates an item");
+
if( newexpr )
{
- // TODO: use add_silence_if_end - Applies if this node is the last node in the block.
-
- // Then call visit on it again
- DEBUG("--- Visiting new node");
- this->visit(newexpr);
- // And schedule it to replace the previous
- replacement = mv$(newexpr);
+ if( nodes_out ) {
+ nodes_out->push_back( mv$(newexpr) );
+ }
+ else {
+ assert( !rv );
+ rv = mv$(newexpr);
+ }
}
else
{
- // TODO: Delete this node somehow? Or just leave it for later.
+ // Expansion line just added a new item
}
- ASSERT_BUG(node.get_pos(), !tmp_local_mod, "TODO: Handle edge case where a macro expansion outside of a _Block creates an item");
+
+ if( ttl->lookahead(0) != TOK_EOF )
+ {
+ if( !nodes_out ) {
+ ERROR(node.get_pos(), E0000, "Unused tokens at the end of macro expansion - " << ttl->getToken());
+ }
+ }
+ }
+ }
+
+ node.m_name = "";
+ return mv$(rv);
+ }
+
+ void visit(::AST::ExprNode_Macro& node) override
+ {
+ TRACE_FUNCTION_F("ExprNode_Macro - name = " << node.m_name);
+ if( node.m_name == "" ) {
+ return ;
+ }
+
+ replacement = this->visit_macro(node, nullptr);
+
+ if( this->replacement )
+ {
+ DEBUG("--- Visiting new node");
+ auto n = mv$(this->replacement);
+ this->visit(n);
+ if( n )
+ {
+ assert( !this->replacement );
+ this->replacement = mv$(n);
}
- DEBUG("ExprNode_Macro - replacement = " << replacement.get());
- node.m_name = "";
}
}
void visit(::AST::ExprNode_Block& node) override {
unsigned int mod_item_count = 0;
+
+ auto prev_modstack = this->modstack;
+ if( node.m_local_mod ) {
+ this->modstack = LList<const ::AST::Module*>(&prev_modstack, node.m_local_mod.get());
+ }
+
// TODO: macro_rules! invocations within the expression list influence this.
// > Solution: Defer creation of the local module until during expand.
if( node.m_local_mod ) {
@@ -377,13 +421,48 @@ struct CExpandExpr:
auto saved = this->current_block;
this->current_block = &node;
- this->visit_vector(node.m_nodes);
+
+ for( auto it = node.m_nodes.begin(); it != node.m_nodes.end(); )
+ {
+ assert( it->get() );
+
+ if( auto* node_mac = dynamic_cast<::AST::ExprNode_Macro*>(it->get()) )
+ {
+ Expand_Attrs((*it)->attrs(), AttrStage::Pre, [&](const auto& sp, const auto& d, const auto& a){ d.handle(sp, a, this->crate, *it); });
+ if( !it->get() ) {
+ it = node.m_nodes.erase( it );
+ continue ;
+ }
+
+ assert(it->get() == node_mac);
+
+ ::std::vector< ::AST::ExprNodeP> new_nodes;
+ this->visit_macro(*node_mac, &new_nodes);
+
+ it = node.m_nodes.erase(it);
+ it = node.m_nodes.insert(it, ::std::make_move_iterator(new_nodes.begin()), ::std::make_move_iterator(new_nodes.end()));
+ // NOTE: Doesn't advance the iterator above, we want to re-visit the new node
+ }
+ else
+ {
+ this->visit(*it);
+ if( it->get() == nullptr ) {
+ it = node.m_nodes.erase( it );
+ }
+ else {
+ ++ it;
+ }
+ }
+ }
+
this->current_block = saved;
// HACK! Run Expand_Mod twice on local modules.
if( node.m_local_mod ) {
Expand_Mod(crate, modstack, node.m_local_mod->path(), *node.m_local_mod, mod_item_count);
}
+
+ this->modstack = mv$(prev_modstack);
}
void visit(::AST::ExprNode_Asm& node) override {
for(auto& v : node.m_output)
@@ -611,11 +690,13 @@ struct CExpandExpr:
void Expand_Expr(::AST::Crate& crate, LList<const AST::Module*> modstack, ::std::unique_ptr<AST::ExprNode>& node)
{
+ TRACE_FUNCTION_F("unique_ptr");
auto visitor = CExpandExpr(crate, modstack);
visitor.visit(node);
}
void Expand_Expr(::AST::Crate& crate, LList<const AST::Module*> modstack, ::std::shared_ptr<AST::ExprNode>& node)
{
+ TRACE_FUNCTION_F("shared_ptr");
auto visitor = CExpandExpr(crate, modstack);
node->visit(visitor);
if( visitor.replacement ) {
@@ -624,6 +705,7 @@ void Expand_Expr(::AST::Crate& crate, LList<const AST::Module*> modstack, ::std:
}
void Expand_Expr(::AST::Crate& crate, LList<const AST::Module*> modstack, AST::Expr& node)
{
+ TRACE_FUNCTION_F("AST::Expr");
auto visitor = CExpandExpr(crate, modstack);
node.visit_nodes(visitor);
if( visitor.replacement ) {
@@ -750,7 +832,7 @@ void Expand_Mod(::AST::Crate& crate, LList<const AST::Module*> modstack, ::AST::
{
auto& i = mod.items()[idx];
- DEBUG("- " << i.name << " (" << ::AST::Item::tag_to_str(i.data.tag()) << ") :: " << i.data.attrs);
+ DEBUG("- " << modpath << "::" << i.name << " (" << ::AST::Item::tag_to_str(i.data.tag()) << ") :: " << i.data.attrs);
::AST::Path path = modpath + i.name;
auto attrs = mv$(i.data.attrs);
diff --git a/src/expand/stringify.cpp b/src/expand/stringify.cpp
index f43d896f..cbb5c65c 100644
--- a/src/expand/stringify.cpp
+++ b/src/expand/stringify.cpp
@@ -20,8 +20,9 @@ class CExpander:
auto lex = TTStream(tt);
while( GET_TOK(tok, lex) != TOK_EOF )
{
+ if(!rv.empty())
+ rv += " ";
rv += tok.to_str();
- rv += " ";
}
return box$( TTStreamO(TokenTree(Token(TOK_STRING, mv$(rv)))) );
diff --git a/src/expand/test.cpp b/src/expand/test.cpp
index fba6556f..12bfbb7d 100644
--- a/src/expand/test.cpp
+++ b/src/expand/test.cpp
@@ -7,21 +7,95 @@
*/
#include <synext_decorator.hpp>
#include <ast/ast.hpp>
+#include <ast/crate.hpp>
class CTestHandler:
public ExpandDecorator
{
- AttrStage stage() const override { return AttrStage::Pre; }
+ AttrStage stage() const override { return AttrStage::Post; }
void handle(const Span& sp, const AST::MetaItem& mi, ::AST::Crate& crate, const AST::Path& path, AST::Module& mod, AST::Item&i) const override {
if( ! i.is_Function() ) {
ERROR(sp, E0000, "#[test] can only be put on functions - found on " << i.tag_str());
}
- // TODO: Proper #[test] support, for now just remove them
- i = AST::Item::make_None({});
+ if( crate.m_test_harness )
+ {
+ ::AST::TestDesc td;
+ for(const auto& node : path.nodes())
+ {
+ td.name += "::";
+ td.name += node.name();
+ }
+ td.path = ::AST::Path(path);
+
+ crate.m_tests.push_back( mv$(td) );
+ }
+ else
+ {
+ i = AST::Item::make_None({});
+ }
+ }
+};
+class CTestHandler_SP:
+ public ExpandDecorator
+{
+ AttrStage stage() const override { return AttrStage::Pre; }
+
+ void handle(const Span& sp, const AST::MetaItem& mi, ::AST::Crate& crate, const AST::Path& path, AST::Module& mod, AST::Item&i) const override {
+ if( ! i.is_Function() ) {
+ ERROR(sp, E0000, "#[should_panic] can only be put on functions - found on " << i.tag_str());
+ }
+
+ if( crate.m_test_harness )
+ {
+ for(auto& td : crate.m_tests)
+ {
+ if( td.path != path )
+ continue ;
+
+ if( mi.has_sub_items() )
+ {
+ td.panic_type = ::AST::TestDesc::ShouldPanic::YesWithMessage;
+ // TODO: Check that name is correct and that it is a string
+ td.expected_panic_message = mi.items().at(0).string();
+ }
+ else
+ {
+ td.panic_type = ::AST::TestDesc::ShouldPanic::Yes;
+ }
+ return ;
+ }
+ //ERROR()
+ }
+ }
+};
+class CTestHandler_Ignore:
+ public ExpandDecorator
+{
+ AttrStage stage() const override { return AttrStage::Pre; }
+
+ void handle(const Span& sp, const AST::MetaItem& mi, ::AST::Crate& crate, const AST::Path& path, AST::Module& mod, AST::Item&i) const override {
+ if( ! i.is_Function() ) {
+ ERROR(sp, E0000, "#[should_panic] can only be put on functions - found on " << i.tag_str());
+ }
+
+ if( crate.m_test_harness )
+ {
+ for(auto& td : crate.m_tests)
+ {
+ if( td.path != path )
+ continue ;
+
+ td.ignore = true;
+ return ;
+ }
+ //ERROR()
+ }
}
};
STATIC_DECORATOR("test", CTestHandler);
+STATIC_DECORATOR("should_panic", CTestHandler_SP);
+STATIC_DECORATOR("ignore", CTestHandler_Ignore);
diff --git a/src/expand/test_harness.cpp b/src/expand/test_harness.cpp
new file mode 100644
index 00000000..12d32121
--- /dev/null
+++ b/src/expand/test_harness.cpp
@@ -0,0 +1,120 @@
+/*
+ * MRustC - Rust Compiler
+ * - By John Hodge (Mutabah/thePowersGang)
+ *
+ * expand/mod.cpp
+ * - Expand pass core code
+ */
+#include <ast/ast.hpp>
+#include <ast/expr.hpp>
+#include <ast/crate.hpp>
+#include <main_bindings.hpp>
+#include <hir/hir.hpp> // ABI_RUST
+
+#define NEWNODE(_ty, ...) ::AST::ExprNodeP(new ::AST::ExprNode##_ty(__VA_ARGS__))
+
+void Expand_TestHarness(::AST::Crate& crate)
+{
+ // Create the following module:
+ // ```
+ // mod `#test` {
+ // extern crate std;
+ // extern crate test;
+ // fn main() {
+ // self::test::test_main_static(&::`#test`::TESTS);
+ // }
+ // static TESTS: [test::TestDescAndFn; _] = [
+ // test::TestDescAndFn { desc: test::TestDesc { name: "foo", ignore: false, should_panic: test::ShouldPanic::No }, testfn: ::path::to::foo },
+ // ];
+ // }
+ // ```
+
+ // ---- main function ----
+ auto main_fn = ::AST::Function { Span(), {}, ABI_RUST, false, false, false, TypeRef(TypeRef::TagUnit(), Span()), {} };
+ {
+ auto call_node = NEWNODE(_CallPath,
+ ::AST::Path("test", { ::AST::PathNode("test_main_static") }),
+ ::make_vec1(
+ NEWNODE(_UniOp, ::AST::ExprNode_UniOp::REF,
+ NEWNODE(_NamedValue, ::AST::Path("", { ::AST::PathNode("test#"), ::AST::PathNode("TESTS") }))
+ )
+ )
+ );
+ main_fn.set_code( mv$(call_node) );
+ }
+
+
+ // ---- test list ----
+ ::std::vector< ::AST::ExprNodeP> test_nodes;
+
+ for(const auto& test : crate.m_tests)
+ {
+ // HACK: Don't emit should_panic tests
+ if( test.panic_type != ::AST::TestDesc::ShouldPanic::No )
+ continue ;
+
+ ::AST::ExprNode_StructLiteral::t_values desc_vals;
+ // `name: "foo",`
+ desc_vals.push_back( ::std::make_pair("name", NEWNODE(_CallPath,
+ ::AST::Path("test", { ::AST::PathNode("StaticTestName") }),
+ ::make_vec1( NEWNODE(_String, test.name) )
+ ) ));
+ // `ignore: false,`
+ desc_vals.push_back( ::std::make_pair("ignore", NEWNODE(_Bool, test.ignore)) );
+ // `should_panic: ShouldPanic::No,`
+ {
+ ::AST::ExprNodeP should_panic_val;
+ switch(test.panic_type)
+ {
+ case ::AST::TestDesc::ShouldPanic::No:
+ should_panic_val = NEWNODE(_NamedValue, ::AST::Path("test", { ::AST::PathNode("ShouldPanic"), ::AST::PathNode("No") }));
+ break;
+ case ::AST::TestDesc::ShouldPanic::Yes:
+ should_panic_val = NEWNODE(_NamedValue, ::AST::Path("test", { ::AST::PathNode("ShouldPanic"), ::AST::PathNode("Yes") }));
+ break;
+ case ::AST::TestDesc::ShouldPanic::YesWithMessage:
+ should_panic_val = NEWNODE(_CallPath,
+ ::AST::Path("test", { ::AST::PathNode("ShouldPanic"), ::AST::PathNode("YesWithMessage") }),
+ make_vec1( NEWNODE(_String, test.expected_panic_message) )
+ );
+ break;
+ }
+ desc_vals.push_back( ::std::make_pair("should_panic", mv$(should_panic_val)) );
+ }
+ auto desc_expr = NEWNODE(_StructLiteral, ::AST::Path("test", { ::AST::PathNode("TestDesc")}), nullptr, mv$(desc_vals));
+
+ ::AST::ExprNode_StructLiteral::t_values descandfn_vals;
+ descandfn_vals.push_back( ::std::make_pair(::std::string("desc"), mv$(desc_expr)) );
+
+ auto test_type_var_name = test.is_benchmark ? "StaticBenchFn" : "StaticTestFn";
+ descandfn_vals.push_back( ::std::make_pair(::std::string("testfn"), NEWNODE(_CallPath,
+ ::AST::Path("test", { ::AST::PathNode(test_type_var_name) }),
+ ::make_vec1( NEWNODE(_NamedValue, AST::Path(test.path)) )
+ ) ) );
+
+ test_nodes.push_back( NEWNODE(_StructLiteral, ::AST::Path("test", { ::AST::PathNode("TestDescAndFn")}), nullptr, mv$(descandfn_vals) ) );
+ }
+ auto* tests_array = new ::AST::ExprNode_Array(mv$(test_nodes));
+
+ size_t test_count = tests_array->m_values.size();
+ auto tests_list = ::AST::Static { ::AST::Static::Class::STATIC,
+ TypeRef(TypeRef::TagSizedArray(), Span(),
+ TypeRef(Span(), ::AST::Path("test", { ::AST::PathNode("TestDescAndFn") })),
+ ::std::shared_ptr<::AST::ExprNode>( new ::AST::ExprNode_Integer(test_count, CORETYPE_UINT) )
+ ),
+ ::AST::Expr( mv$(tests_array) )
+ };
+
+ // ---- module ----
+ auto newmod = ::AST::Module { ::AST::Path("", { ::AST::PathNode("test#") }) };
+ // - TODO: These need to be loaded too.
+ // > They don't actually need to exist here, just be loaded (and use absolute paths)
+ newmod.add_ext_crate(false, "std", "std", {});
+ newmod.add_ext_crate(false, "test", "test", {});
+
+ newmod.add_item(false, "main", mv$(main_fn), {});
+ newmod.add_item(false, "TESTS", mv$(tests_list), {});
+
+ crate.m_root_module.add_item(false, "test#", mv$(newmod), {});
+ crate.m_lang_items["mrustc-main"] = ::AST::Path("", { AST::PathNode("test#"), AST::PathNode("main") });
+}
diff --git a/src/hir/deserialise.cpp b/src/hir/deserialise.cpp
index c08f886d..0ff4d63b 100644
--- a/src/hir/deserialise.cpp
+++ b/src/hir/deserialise.cpp
@@ -586,6 +586,7 @@ namespace {
m.dst_type = static_cast< ::HIR::TraitMarkings::DstType>( m_in.read_tag() );
m.coerce_unsized_index = m_in.read_count( );
m.unsized_field = m_in.read_count( );
+ m.unsized_param = m_in.read_count();
// TODO: auto_impls
return m;
}
@@ -1009,6 +1010,11 @@ namespace {
sdf.other = static_cast<unsigned int>(m_in.read_count());
return ::MIR::Statement::make_SetDropFlag(sdf);
}
+ case 4:
+ return ::MIR::Statement::make_ScopeEnd({
+ deserialise_vec<unsigned int>(),
+ deserialise_vec<unsigned int>()
+ });
default:
::std::cerr << "Bad tag for a MIR Statement" << ::std::endl;
throw "";
diff --git a/src/hir/dump.cpp b/src/hir/dump.cpp
index f00efcd5..8bcffa4e 100644
--- a/src/hir/dump.cpp
+++ b/src/hir/dump.cpp
@@ -165,7 +165,13 @@ namespace {
(Unit,
),
(Value,
- m_os << " = ?";// <<
+ m_os << " = ";
+ if( e.val.is_Invalid() ) {
+ m_os << "?";
+ }
+ else {
+ m_os << e.val;
+ }
),
(Tuple,
m_os << "(";
@@ -296,28 +302,28 @@ namespace {
void visit(::HIR::ExprNode_Block& node) override
{
if( node.m_nodes.size() == 0 ) {
- m_os << "{ }";
+ m_os << "{";
+ if( node.m_value_node )
+ {
+ m_os << " ";
+ this->visit_node_ptr(node.m_value_node);
+ }
+ m_os << " }";
}
- //else if( node.m_nodes.size() == 1) {
- // m_os << "{ ";
- // this->visit_node_ptr(node.m_nodes.front());
- // m_os << " }";
- //}
else {
m_os << "{\n";
inc_indent();
for(auto& sn : node.m_nodes) {
+ m_os << "\n";
m_os << indent();
this->visit_node_ptr(sn);
- if( &sn != &node.m_nodes.back() ) {
- m_os << ";\n";
- }
- else if( !node.m_yields_final ) {
- m_os << ";\n";
- }
- else {
- m_os << "\n";
- }
+ m_os << ";\n";
+ }
+ if( node.m_value_node )
+ {
+ m_os << indent();
+ this->visit_node_ptr(node.m_value_node);
+ m_os << "\n";
}
dec_indent();
m_os << indent() << "}";
@@ -418,7 +424,7 @@ namespace {
void visit(::HIR::ExprNode_Assign& node) override
{
this->visit_node_ptr(node.m_slot);
- m_os << " = ";
+ m_os << " " << ::HIR::ExprNode_Assign::opname(node.m_op) << "= ";
this->visit_node_ptr(node.m_value);
}
void visit(::HIR::ExprNode_BinOp& node) override
diff --git a/src/hir/expr.cpp b/src/hir/expr.cpp
index e19e7df6..53f85c42 100644
--- a/src/hir/expr.cpp
+++ b/src/hir/expr.cpp
@@ -23,6 +23,8 @@ DEF_VISIT(ExprNode_Block, node,
for(auto& subnode : node.m_nodes) {
visit_node_ptr(subnode);
}
+ if( node.m_value_node )
+ visit_node_ptr(node.m_value_node);
)
DEF_VISIT(ExprNode_Asm, node,
for(auto& v : node.m_outputs)
diff --git a/src/hir/expr.hpp b/src/hir/expr.hpp
index 81e89b28..3c37a930 100644
--- a/src/hir/expr.hpp
+++ b/src/hir/expr.hpp
@@ -68,21 +68,20 @@ struct ExprNode_Block:
{
bool m_is_unsafe;
::std::vector< ExprNodeP > m_nodes;
- bool m_yields_final;
+ ExprNodeP m_value_node; // can be null
::HIR::SimplePath m_local_mod;
t_trait_list m_traits;
ExprNode_Block(Span sp):
ExprNode(mv$(sp)),
- m_is_unsafe(false),
- m_yields_final(false)
+ m_is_unsafe(false)
{}
- ExprNode_Block(Span sp, bool is_unsafe, ::std::vector<ExprNodeP> nodes):
+ ExprNode_Block(Span sp, bool is_unsafe, ::std::vector<ExprNodeP> nodes, ExprNodeP value_node):
ExprNode( mv$(sp) ),
m_is_unsafe(is_unsafe),
m_nodes( mv$(nodes) ),
- m_yields_final(false)
+ m_value_node( mv$(value_node) )
{}
NODE_METHODS();
diff --git a/src/hir/from_ast_expr.cpp b/src/hir/from_ast_expr.cpp
index 25f6eade..cd2b27fa 100644
--- a/src/hir/from_ast_expr.cpp
+++ b/src/hir/from_ast_expr.cpp
@@ -33,7 +33,11 @@ struct LowerHIR_ExprNode_Visitor:
ASSERT_BUG(v.get_pos(), n, "NULL node encountered in block");
rv->m_nodes.push_back( LowerHIR_ExprNode_Inner( *n ) );
}
- rv->m_yields_final = v.m_yields_final_value;
+ if( v.m_yields_final_value && ! rv->m_nodes.empty() )
+ {
+ rv->m_value_node = mv$(rv->m_nodes.back());
+ rv->m_nodes.pop_back();
+ }
if( v.m_local_mod )
{
@@ -89,8 +93,8 @@ struct LowerHIR_ExprNode_Visitor:
case ::AST::ExprNode_Assign::ADD: return ::HIR::ExprNode_Assign::Op::Add;
case ::AST::ExprNode_Assign::SUB: return ::HIR::ExprNode_Assign::Op::Sub;
- case ::AST::ExprNode_Assign::DIV: return ::HIR::ExprNode_Assign::Op::Mul;
- case ::AST::ExprNode_Assign::MUL: return ::HIR::ExprNode_Assign::Op::Div;
+ case ::AST::ExprNode_Assign::MUL: return ::HIR::ExprNode_Assign::Op::Mul;
+ case ::AST::ExprNode_Assign::DIV: return ::HIR::ExprNode_Assign::Op::Div;
case ::AST::ExprNode_Assign::MOD: return ::HIR::ExprNode_Assign::Op::Mod;
case ::AST::ExprNode_Assign::AND: return ::HIR::ExprNode_Assign::Op::And;
@@ -328,7 +332,7 @@ struct LowerHIR_ExprNode_Visitor:
m_rv.reset( new ::HIR::ExprNode_Loop( v.span(),
v.m_label,
- ::HIR::ExprNodeP(new ::HIR::ExprNode_Block( v.span(), false, mv$(code)))
+ ::HIR::ExprNodeP(new ::HIR::ExprNode_Block( v.span(), false, mv$(code), {} ))
) );
break; }
case ::AST::ExprNode_Loop::WHILELET: {
diff --git a/src/hir/hir.cpp b/src/hir/hir.cpp
index 9d1fcf97..d50c3e49 100644
--- a/src/hir/hir.cpp
+++ b/src/hir/hir.cpp
@@ -93,6 +93,30 @@ const ::HIR::Enum::Variant* ::HIR::Enum::get_variant(const ::std::string& name)
return nullptr;
return &it->second;
}
+bool HIR::Enum::is_value() const
+{
+ return this->m_repr != ::HIR::Enum::Repr::Rust || ::std::all_of(m_variants.begin(), m_variants.end(), [](const auto& x){return x.second.is_Unit() || x.second.is_Value();});
+}
+uint32_t HIR::Enum::get_value(size_t idx) const
+{
+ assert(idx < m_variants.size());
+
+ if( const auto* e = m_variants[idx].second.opt_Value() )
+ {
+ return e->val.as_Integer();
+ }
+
+ uint32_t val = 0;
+ for(size_t i = 0; i < idx; i ++)
+ {
+ if( const auto* e = m_variants[i].second.opt_Value() )
+ {
+ val = e->val.as_Integer();
+ }
+ val ++;
+ }
+ return val;
+}
namespace {
bool matches_genericpath(const ::HIR::GenericParams& params, const ::HIR::GenericPath& left, const ::HIR::GenericPath& right, ::HIR::t_cb_resolve_type ty_res, bool expand_generic);
diff --git a/src/hir/hir.hpp b/src/hir/hir.hpp
index 2f6b37df..304bdd64 100644
--- a/src/hir/hir.hpp
+++ b/src/hir/hir.hpp
@@ -178,6 +178,7 @@ struct TraitMarkings
TraitObject, // (Trait)
} dst_type;
unsigned int unsized_field = ~0u;
+ unsigned int unsized_param = ~0u;
/// `true` if there is a Copy impl
bool is_copy = false;
@@ -219,6 +220,11 @@ public:
TraitMarkings m_markings;
const Variant* get_variant(const ::std::string& ) const;
+
+ /// Returns true if this enum is a C-like enum (has values only)
+ bool is_value() const;
+ /// Returns the value for the given variant
+ uint32_t get_value(size_t variant) const;
};
class Struct
{
diff --git a/src/hir/serialise.cpp b/src/hir/serialise.cpp
index 81319c2e..77e17dba 100644
--- a/src/hir/serialise.cpp
+++ b/src/hir/serialise.cpp
@@ -495,6 +495,11 @@ namespace {
m_out.write_count(e.idx);
m_out.write_bool(e.new_val);
m_out.write_count(e.other);
+ ),
+ (ScopeEnd,
+ m_out.write_tag(4);
+ serialise_vec(e.vars);
+ serialise_vec(e.tmps);
)
)
}
@@ -859,6 +864,7 @@ namespace {
m_out.write_tag( static_cast<unsigned int>(m.dst_type) );
m_out.write_count( m.coerce_unsized_index );
m_out.write_count( m.unsized_field );
+ m_out.write_count( m.unsized_param );
// TODO: auto_impls
}
diff --git a/src/hir/type.cpp b/src/hir/type.cpp
index dd655ccc..7dd8dc80 100644
--- a/src/hir/type.cpp
+++ b/src/hir/type.cpp
@@ -606,7 +606,26 @@ bool ::HIR::TypeRef::match_test_generics(const Span& sp, const ::HIR::TypeRef& x
return Compare::Unequal;
}
TU_MATCH(::HIR::TypeRef::Data, (v.m_data, x.m_data), (te, xe),
- (Infer, throw "";),
+ (Infer,
+ // Both sides are infer
+ switch(te.ty_class)
+ {
+ case ::HIR::InferClass::None:
+ case ::HIR::InferClass::Diverge:
+ return Compare::Fuzzy;
+ default:
+ switch(xe.ty_class)
+ {
+ case ::HIR::InferClass::None:
+ case ::HIR::InferClass::Diverge:
+ return Compare::Fuzzy;
+ default:
+ if( te.ty_class != xe.ty_class )
+ return Compare::Unequal;
+ return Compare::Fuzzy;
+ }
+ }
+ ),
(Generic, throw "";),
(Primitive,
return (te == xe ? Compare::Equal : Compare::Unequal);
diff --git a/src/hir_conv/constant_evaluation.cpp b/src/hir_conv/constant_evaluation.cpp
index 1ebdbb11..a950a3dd 100644
--- a/src/hir_conv/constant_evaluation.cpp
+++ b/src/hir_conv/constant_evaluation.cpp
@@ -276,6 +276,10 @@ namespace {
{
e->visit(*this);
}
+ if( node.m_value_node )
+ node.m_value_node->visit(*this);
+ else
+ ;
}
void visit(::HIR::ExprNode_Asm& node) override {
badnode(node);
diff --git a/src/hir_conv/markings.cpp b/src/hir_conv/markings.cpp
index a294f47b..4550bef5 100644
--- a/src/hir_conv/markings.cpp
+++ b/src/hir_conv/markings.cpp
@@ -43,6 +43,33 @@ public:
{
str.m_markings.unsized_field = (str.m_data.is_Tuple() ? str.m_data.as_Tuple().size()-1 : str.m_data.as_Named().size()-1);
}
+
+ // Rules:
+ // - A type parameter must be ?Sized
+ // - That type parameter must only be used as part of the last field, and only once
+ // - If the final field isn't the parameter, it must also impl Unsize
+
+ // HACK: Just determine what ?Sized parameter is controlling the sized-ness
+ if( str.m_markings.dst_type == ::HIR::TraitMarkings::DstType::Possible )
+ {
+ auto& last_field_ty = (str.m_data.is_Tuple() ? str.m_data.as_Tuple().back().ent : str.m_data.as_Named().back().second.ent);
+ auto ty = ::HIR::TypeRef("", 0);
+ for(size_t i = 0; i < str.m_params.m_types.size(); i++)
+ {
+ const auto& param = str.m_params.m_types[i];
+ auto ty = ::HIR::TypeRef(param.m_name, i);
+ if( !param.m_is_sized )
+ {
+ if( visit_ty_with(last_field_ty, [&](const auto& t){ return t == ty; }) )
+ {
+ assert(str.m_markings.unsized_param == ~0u);
+ str.m_markings.unsized_param = i;
+ }
+ }
+ }
+ ASSERT_BUG(Span(), str.m_markings.unsized_param != ~0u, "No unsized param for type " << ip);
+ str.m_markings.can_unsize = true;
+ }
}
void visit_trait(::HIR::ItemPath ip, ::HIR::Trait& tr) override
@@ -212,6 +239,7 @@ public:
::HIR::TraitMarkings::DstType get_field_dst_type(const ::HIR::TypeRef& ty, const ::HIR::GenericParams& inner_def, const ::HIR::GenericParams& params_def, const ::HIR::PathParams* params)
{
+ TRACE_FUNCTION_F("ty=" << ty);
// If the type is generic, and the pointed-to parameters is ?Sized, record as needing unsize
if( const auto* te = ty.m_data.opt_Generic() )
{
@@ -242,7 +270,7 @@ public:
// If the type is a struct, check it (recursively)
if( ! te->path.m_data.is_Generic() ) {
// Associated type, TODO: Check this better.
- return ::HIR::TraitMarkings::DstType::Possible;
+ return ::HIR::TraitMarkings::DstType::None;
}
else if( te->binding.is_Struct() ) {
const auto& params_tpl = te->path.m_data.as_Generic().m_params;
diff --git a/src/hir_expand/annotate_value_usage.cpp b/src/hir_expand/annotate_value_usage.cpp
index da4c897f..dbcd6b13 100644
--- a/src/hir_expand/annotate_value_usage.cpp
+++ b/src/hir_expand/annotate_value_usage.cpp
@@ -84,6 +84,8 @@ namespace {
for( auto& subnode : node.m_nodes ) {
this->visit_node_ptr(subnode);
}
+ if( node.m_value_node )
+ this->visit_node_ptr(node.m_value_node);
}
void visit(::HIR::ExprNode_Asm& node) override
@@ -463,6 +465,7 @@ namespace {
return get_usage_for_pattern(sp, *pe.sub, *ty.m_data.as_Borrow().inner);
),
(Tuple,
+ ASSERT_BUG(sp, ty.m_data.is_Tuple(), "Tuple pattern with non-tuple type - " << ty);
const auto& subtys = ty.m_data.as_Tuple();
assert(pe.sub_patterns.size() == subtys.size());
auto rv = ::HIR::ValueUsage::Borrow;
@@ -471,6 +474,7 @@ namespace {
return rv;
),
(SplitTuple,
+ ASSERT_BUG(sp, ty.m_data.is_Tuple(), "SplitTuple pattern with non-tuple type - " << ty);
const auto& subtys = ty.m_data.as_Tuple();
assert(pe.leading.size() + pe.trailing.size() < subtys.size());
auto rv = ::HIR::ValueUsage::Borrow;
@@ -486,6 +490,7 @@ namespace {
(StructTuple,
// TODO: Avoid monomorphising all the time.
const auto& str = *pe.binding;
+ ASSERT_BUG(sp, str.m_data.is_Tuple(), "StructTuple pattern with non-tuple struct - " << str.m_data.tag_str());
const auto& flds = str.m_data.as_Tuple();
assert(pe.sub_patterns.size() == flds.size());
auto monomorph_cb = monomorphise_type_get_cb(sp, nullptr, &pe.path.m_params, nullptr);
diff --git a/src/hir_expand/reborrow.cpp b/src/hir_expand/reborrow.cpp
index 688dcf97..b6f6aeb4 100644
--- a/src/hir_expand/reborrow.cpp
+++ b/src/hir_expand/reborrow.cpp
@@ -73,8 +73,8 @@ namespace {
// Recurse into blocks - Neater this way
else if( auto p = dynamic_cast< ::HIR::ExprNode_Block*>(node_ptr.get()) )
{
- auto& last_node = p->m_nodes.back();
- last_node = do_reborrow(mv$(last_node));
+ ASSERT_BUG( node_ptr->span(), p->m_value_node, "reborrow into block that doesn't yield" );
+ p->m_value_node = do_reborrow(mv$(p->m_value_node));
}
else
{
diff --git a/src/hir_expand/ufcs_everything.cpp b/src/hir_expand/ufcs_everything.cpp
index 60d503aa..23b9c288 100644
--- a/src/hir_expand/ufcs_everything.cpp
+++ b/src/hir_expand/ufcs_everything.cpp
@@ -410,7 +410,7 @@ namespace {
break;
case ::HIR::ExprNode_BinOp::Op::Shr: langitem = method = "shr"; if(0)
- case ::HIR::ExprNode_BinOp::Op::Shl: langitem = method = "shr";
+ case ::HIR::ExprNode_BinOp::Op::Shl: langitem = method = "shl";
if( is_op_valid_shift(ty_l, ty_r) ) {
return ;
}
@@ -611,6 +611,7 @@ namespace {
m_replacement = NEWNODE( mv$(node.m_res_type), Deref, sp, mv$(m_replacement) );
}
+#if 0
void visit(::HIR::ExprNode_Deref& node) override
{
const auto& sp = node.span();
@@ -689,6 +690,7 @@ namespace {
// - Dereference the result (which is an &-ptr)
m_replacement = NEWNODE( mv$(node.m_res_type), Deref, sp, mv$(m_replacement) );
}
+#endif
diff --git a/src/hir_typeck/expr_check.cpp b/src/hir_typeck/expr_check.cpp
index 6cbb61f4..1b4317a6 100644
--- a/src/hir_typeck/expr_check.cpp
+++ b/src/hir_typeck/expr_check.cpp
@@ -59,9 +59,10 @@ namespace {
{
n->visit(*this);
}
- if( node.m_nodes.size() > 0 && node.m_yields_final )
+ if( node.m_value_node )
{
- check_types_equal(node.span(), node.m_res_type, node.m_nodes.back()->m_res_type);
+ node.m_value_node->visit(*this);
+ check_types_equal(node.span(), node.m_res_type, node.m_value_node->m_res_type);
}
}
void visit(::HIR::ExprNode_Asm& node) override
diff --git a/src/hir_typeck/expr_cs.cpp b/src/hir_typeck/expr_cs.cpp
index 66baba4d..2388d078 100644
--- a/src/hir_typeck/expr_cs.cpp
+++ b/src/hir_typeck/expr_cs.cpp
@@ -543,13 +543,12 @@ namespace {
return ty.m_data.is_Diverge();// || (ty.m_data.is_Infer() && ty.m_data.as_Infer().ty_class == ::HIR::InferClass::Diverge);
};
+ bool diverges = false;
+ this->push_traits( node.m_traits );
if( node.m_nodes.size() > 0 )
{
- bool diverges = false;
- this->push_traits( node.m_traits );
-
this->push_inner_coerce(false);
- for( unsigned int i = 0; i < node.m_nodes.size()-1; i ++ )
+ for( unsigned int i = 0; i < node.m_nodes.size(); i ++ )
{
auto& snp = node.m_nodes[i];
this->context.add_ivars( snp->m_res_type );
@@ -561,62 +560,56 @@ namespace {
}
}
this->pop_inner_coerce();
+ }
- if( node.m_yields_final )
- {
- auto& snp = node.m_nodes.back();
- DEBUG("Block yields final value");
- this->context.add_ivars( snp->m_res_type );
- this->context.equate_types(snp->span(), node.m_res_type, snp->m_res_type);
- snp->visit(*this);
- }
- else
+ if( node.m_value_node )
+ {
+ auto& snp = node.m_value_node;
+ DEBUG("Block yields final value");
+ this->context.add_ivars( snp->m_res_type );
+ this->context.equate_types(snp->span(), node.m_res_type, snp->m_res_type);
+ snp->visit(*this);
+ }
+ else if( node.m_nodes.size() > 0 )
+ {
+ // NOTE: If the final statement in the block diverges, mark this as diverging
+ const auto& snp = node.m_nodes.back();
+ bool defer = false;
+ if( !diverges )
{
- auto& snp = node.m_nodes.back();
- this->context.add_ivars( snp->m_res_type );
- // - Not yielded - so don't equate the return
- snp->visit(*this);
-
- // NOTE: If the final statement in the block diverges, mark this as diverging
- bool defer = false;
- if( !diverges )
- {
- TU_IFLET(::HIR::TypeRef::Data, this->context.get_type(snp->m_res_type).m_data, Infer, e,
- switch(e.ty_class)
- {
- case ::HIR::InferClass::Integer:
- case ::HIR::InferClass::Float:
- diverges = false;
- break;
- default:
- defer = true;
- break;
- }
- )
- else if( is_diverge(snp->m_res_type) ) {
- diverges = true;
- }
- else {
+ TU_IFLET(::HIR::TypeRef::Data, this->context.get_type(snp->m_res_type).m_data, Infer, e,
+ switch(e.ty_class)
+ {
+ case ::HIR::InferClass::Integer:
+ case ::HIR::InferClass::Float:
diverges = false;
+ break;
+ default:
+ defer = true;
+ break;
}
- }
-
- // If a statement in this block diverges
- if( defer ) {
- DEBUG("Block final node returns _, derfer diverge check");
- this->context.add_revisit(node);
- }
- else if( diverges ) {
- DEBUG("Block diverges, yield !");
- this->context.equate_types(node.span(), node.m_res_type, ::HIR::TypeRef::new_diverge());
+ )
+ else if( is_diverge(snp->m_res_type) ) {
+ diverges = true;
}
else {
- DEBUG("Block doesn't diverge but doesn't yield a value, yield ()");
- this->context.equate_types(node.span(), node.m_res_type, ::HIR::TypeRef::new_unit());
+ diverges = false;
}
}
- this->pop_traits( node.m_traits );
+ // If a statement in this block diverges
+ if( defer ) {
+ DEBUG("Block final node returns _, derfer diverge check");
+ this->context.add_revisit(node);
+ }
+ else if( diverges ) {
+ DEBUG("Block diverges, yield !");
+ this->context.equate_types(node.span(), node.m_res_type, ::HIR::TypeRef::new_diverge());
+ }
+ else {
+ DEBUG("Block doesn't diverge but doesn't yield a value, yield ()");
+ this->context.equate_types(node.span(), node.m_res_type, ::HIR::TypeRef::new_unit());
+ }
}
else
{
@@ -624,6 +617,7 @@ namespace {
DEBUG("Block is empty, yield ()");
this->context.equate_types(node.span(), node.m_res_type, ::HIR::TypeRef::new_unit());
}
+ this->pop_traits( node.m_traits );
}
void visit(::HIR::ExprNode_Asm& node) override
{
@@ -1199,7 +1193,7 @@ namespace {
{
const auto& name = val.first;
auto it = ::std::find_if(fields.begin(), fields.end(), [&](const auto& v)->bool{ return v.first == name; });
- assert(it != fields.end());
+ ASSERT_BUG(node.span(), it != fields.end(), "Field '" << name << "' not found in struct " << node.m_path);
const auto& des_ty_r = it->second.ent;
auto& des_ty_cache = node.m_value_types[it - fields.begin()];
const auto* des_ty = &des_ty_r;
@@ -1899,6 +1893,7 @@ namespace {
return ty.m_data.is_Diverge();// || (ty.m_data.is_Infer() && ty.m_data.as_Infer().ty_class == ::HIR::InferClass::Diverge);
};
+ assert( !node.m_nodes.empty() );
const auto& last_ty = this->context.get_type( node.m_nodes.back()->m_res_type );
DEBUG("_Block: last_ty = " << last_ty);
@@ -2915,11 +2910,11 @@ namespace {
void visit(::HIR::ExprNode_Literal& node) override {
TU_MATCH(::HIR::ExprNode_Literal::Data, (node.m_data), (e),
(Integer,
- ASSERT_BUG(node.span(), node.m_res_type.m_data.is_Primitive(), "Float Literal didn't return primitive");
+ ASSERT_BUG(node.span(), node.m_res_type.m_data.is_Primitive(), "Integer _Literal didn't return primitive - " << node.m_res_type);
e.m_type = node.m_res_type.m_data.as_Primitive();
),
(Float,
- ASSERT_BUG(node.span(), node.m_res_type.m_data.is_Primitive(), "Float Literal didn't return primitive");
+ ASSERT_BUG(node.span(), node.m_res_type.m_data.is_Primitive(), "Float Literal didn't return primitive - " << node.m_res_type);
e.m_type = node.m_res_type.m_data.as_Primitive();
),
(Boolean,
@@ -4057,11 +4052,12 @@ namespace {
while( auto* p = dynamic_cast< ::HIR::ExprNode_Block*>(&**node_ptr_ptr) )
{
DEBUG("- Moving into block");
- ASSERT_BUG( p->span(), context.m_ivars.types_equal(p->m_res_type, p->m_nodes.back()->m_res_type),
- "Block and result mismatch - " << context.m_ivars.fmt_type(p->m_res_type) << " != " << context.m_ivars.fmt_type(p->m_nodes.back()->m_res_type));
+ assert( p->m_value_node );
+ ASSERT_BUG( p->span(), context.m_ivars.types_equal(p->m_res_type, p->m_value_node->m_res_type),
+ "Block and result mismatch - " << context.m_ivars.fmt_type(p->m_res_type) << " != " << context.m_ivars.fmt_type(p->m_value_node->m_res_type));
// - Override the the result type to the desired result
p->m_res_type = ::HIR::TypeRef::new_borrow(borrow_type, des_borrow_inner.clone());
- node_ptr_ptr = &p->m_nodes.back();
+ node_ptr_ptr = &p->m_value_node;
}
#endif
auto& node_ptr = *node_ptr_ptr;
@@ -4187,38 +4183,40 @@ namespace {
// Deref coercions
// - If right can be dereferenced to left
+ DEBUG("-- Deref coercions");
{
::HIR::TypeRef tmp_ty;
- const ::HIR::TypeRef* out_ty = &ty_src;
+ const ::HIR::TypeRef* out_ty_p = &ty_src;
unsigned int count = 0;
::std::vector< ::HIR::TypeRef> types;
- while( (out_ty = context.m_resolve.autoderef(sp, *out_ty, tmp_ty)) )
+ while( (out_ty_p = context.m_resolve.autoderef(sp, *out_ty_p, tmp_ty)) )
{
+ const auto& out_ty = context.m_ivars.get_type(*out_ty_p);
count += 1;
- if( out_ty->m_data.is_Infer() && out_ty->m_data.as_Infer().ty_class == ::HIR::InferClass::None ) {
+ if( out_ty.m_data.is_Infer() && out_ty.m_data.as_Infer().ty_class == ::HIR::InferClass::None ) {
// Hit a _, so can't keep going
break;
}
- types.push_back( out_ty->clone() );
+ types.push_back( out_ty.clone() );
- if( context.m_ivars.types_equal(ty_dst, *out_ty) == false ) {
+ if( context.m_ivars.types_equal(ty_dst, out_ty) == false ) {
// Check equivalence
- if( ty_dst.m_data.tag() == out_ty->m_data.tag() ) {
- TU_MATCH_DEF( ::HIR::TypeRef::Data, (ty_dst.m_data, out_ty->m_data), (d_e, s_e),
+ if( ty_dst.m_data.tag() == out_ty.m_data.tag() ) {
+ TU_MATCH_DEF( ::HIR::TypeRef::Data, (ty_dst.m_data, out_ty.m_data), (d_e, s_e),
(
- if( ty_dst .compare_with_placeholders(sp, *out_ty, context.m_ivars.callback_resolve_infer()) == ::HIR::Compare::Unequal ) {
+ if( ty_dst .compare_with_placeholders(sp, out_ty, context.m_ivars.callback_resolve_infer()) == ::HIR::Compare::Unequal ) {
DEBUG("Same tag, but not fuzzy match");
continue ;
}
- DEBUG("Same tag and fuzzy match - assuming " << ty_dst << " == " << *out_ty);
- context.equate_types(sp, ty_dst, *out_ty);
+ DEBUG("Same tag and fuzzy match - assuming " << ty_dst << " == " << out_ty);
+ context.equate_types(sp, ty_dst, out_ty);
),
(Slice,
// Equate!
- context.equate_types(sp, ty_dst, *out_ty);
+ context.equate_types(sp, ty_dst, out_ty);
// - Fall through
)
)
@@ -4338,15 +4336,13 @@ namespace {
// Search for Unsize
// - If `right`: ::core::marker::Unsize<`left`>
+ DEBUG("-- Unsize trait");
{
- const auto& lang_Unsize = context.m_crate.get_lang_item_path(sp, "unsize");
- ::HIR::PathParams pp;
- pp.m_types.push_back( ty_dst.clone() );
- bool found = context.m_resolve.find_trait_impls(sp, lang_Unsize, pp, ty_src, [&](auto impl, auto cmp) {
- // TODO: Allow fuzzy match if only match
- return cmp == ::HIR::Compare::Equal;
+ auto cmp = context.m_resolve.can_unsize(sp, ty_dst, ty_src, [&](auto new_dst) {
+ // Equate these two types
});
- if( found ) {
+ if(cmp == ::HIR::Compare::Equal)
+ {
DEBUG("- Unsize " << &*node_ptr << " -> " << ty_dst);
auto ty_dst_b = ::HIR::TypeRef::new_borrow(bt, ty_dst.clone());
auto ty_dst_b2 = ty_dst_b.clone();
@@ -4355,18 +4351,27 @@ namespace {
return true;
}
- }
-
- if( ty_dst.m_data.is_Path() && ty_dst.m_data.as_Path().binding.is_Unbound() )
- {
- }
- else if( ty_src.m_data.is_Path() && ty_src.m_data.as_Path().binding.is_Unbound() )
- {
- }
- else if( ty_dst.compare_with_placeholders(sp, ty_src, context.m_ivars.callback_resolve_infer()) != ::HIR::Compare::Unequal )
- {
- context.equate_types(sp, ty_dst, ty_src);
- return true;
+ if(cmp == ::HIR::Compare::Unequal)
+ {
+ // No unsize possible, equate types
+ // - Only if they're not already fixed as unequal (that gets handled elsewhere)
+ if( ty_dst.m_data.is_Path() && ty_dst.m_data.as_Path().binding.is_Unbound() )
+ {
+ }
+ else if( ty_src.m_data.is_Path() && ty_src.m_data.as_Path().binding.is_Unbound() )
+ {
+ }
+ else if( ty_dst.compare_with_placeholders(sp, ty_src, context.m_ivars.callback_resolve_infer()) != ::HIR::Compare::Unequal )
+ {
+ context.equate_types(sp, ty_dst, ty_src);
+ return true;
+ }
+ }
+ if(cmp == ::HIR::Compare::Fuzzy)
+ {
+ // Not sure yet
+ return false;
+ }
}
// Keep trying
@@ -4635,13 +4640,13 @@ namespace {
while( auto* p = dynamic_cast< ::HIR::ExprNode_Block*>(&**npp) )
{
DEBUG("- Propagate to the last node of a _Block");
- ASSERT_BUG( p->span(), context.m_ivars.types_equal(p->m_res_type, p->m_nodes.back()->m_res_type),
- "Block and result mismatch - " << context.m_ivars.fmt_type(p->m_res_type) << " != " << context.m_ivars.fmt_type(p->m_nodes.back()->m_res_type));
+ ASSERT_BUG( p->span(), context.m_ivars.types_equal(p->m_res_type, p->m_value_node->m_res_type),
+ "Block and result mismatch - " << context.m_ivars.fmt_type(p->m_res_type) << " != " << context.m_ivars.fmt_type(p->m_value_node->m_res_type));
ASSERT_BUG( p->span(), context.m_ivars.types_equal(p->m_res_type, ty_src),
"Block and result mismatch - " << context.m_ivars.fmt_type(p->m_res_type) << " != " << context.m_ivars.fmt_type(ty_src)
);
p->m_res_type = new_type.clone();
- npp = &p->m_nodes.back();
+ npp = &p->m_value_node;
}
::HIR::ExprNodeP& node_ptr = *npp;
@@ -4872,10 +4877,16 @@ namespace {
// - If either is an ivar, add the other as a possibility
TU_IFLET( ::HIR::TypeRef::Data, src_ty.m_data, Infer, se,
// TODO: Update for InferClass::Diverge ?
- if( se.ty_class != ::HIR::InferClass::None ) {
- context.equate_types(sp, dst_ty, src_ty);
- }
- else {
+ switch(se.ty_class)
+ {
+ case ::HIR::InferClass::Integer:
+ case ::HIR::InferClass::Float:
+ if( dst_ty.m_data.is_Primitive() ) {
+ context.equate_types(sp, dst_ty, src_ty);
+ }
+ break;
+ case ::HIR::InferClass::None:
+ case ::HIR::InferClass::Diverge:
TU_IFLET(::HIR::TypeRef::Data, dst_ty.m_data, Infer, de,
context.possible_equate_type_unsize_to(se.index, dst_ty);
context.possible_equate_type_unsize_from(de.index, src_ty);
@@ -4898,6 +4909,7 @@ namespace {
// No equivalence added
}
// - Fall through and search for the impl
+ DEBUG("- Unsize, no ivar equivalence");
}
if( v.trait == context.m_crate.get_lang_item_path(sp, "coerce_unsized") )
{
diff --git a/src/hir_typeck/helpers.cpp b/src/hir_typeck/helpers.cpp
index 1bf2cafd..8f7dd2f2 100644
--- a/src/hir_typeck/helpers.cpp
+++ b/src/hir_typeck/helpers.cpp
@@ -1020,6 +1020,41 @@ bool TraitResolution::iterate_bounds( ::std::function<bool(const ::HIR::GenericB
}
return false;
}
+bool TraitResolution::iterate_aty_bounds(const Span& sp, const ::HIR::Path::Data::Data_UfcsKnown& pe, ::std::function<bool(const ::HIR::TraitPath&)> cb) const
+{
+ const auto& trait_ref = m_crate.get_trait_by_path(sp, pe.trait.m_path);
+ ASSERT_BUG(sp, trait_ref.m_types.count( pe.item ) != 0, "Trait " << pe.trait.m_path << " doesn't contain an associated type " << pe.item);
+ const auto& aty_def = trait_ref.m_types.find(pe.item)->second;
+
+ for(const auto& bound : aty_def.m_trait_bounds)
+ {
+ if( cb(bound) )
+ return true;
+ }
+ // Search `<Self as Trait>::Name` bounds on the trait itself
+ for(const auto& bound : trait_ref.m_params.m_bounds)
+ {
+ if( ! bound.is_TraitBound() ) continue ;
+ const auto& be = bound.as_TraitBound();
+
+ if( ! be.type.m_data.is_Path() ) continue ;
+ if( ! be.type.m_data.as_Path().binding.is_Opaque() ) continue ;
+
+ const auto& be_type_pe = be.type.m_data.as_Path().path.m_data.as_UfcsKnown();
+ if( *be_type_pe.type != ::HIR::TypeRef("Self", 0xFFFF) )
+ continue ;
+ if( be_type_pe.trait.m_path != pe.trait.m_path )
+ continue ;
+ if( be_type_pe.item != pe.item )
+ continue ;
+
+ if( cb(be.trait) )
+ return true;
+ }
+
+ return false;
+}
+
bool TraitResolution::find_trait_impls(const Span& sp,
const ::HIR::SimplePath& trait, const ::HIR::PathParams& params,
const ::HIR::TypeRef& ty,
@@ -1075,66 +1110,26 @@ bool TraitResolution::find_trait_impls(const Span& sp,
}
// Magic Unsize impls to trait objects
- if( trait == lang_Unsize ) {
+ if( trait == lang_Unsize )
+ {
ASSERT_BUG(sp, params.m_types.size() == 1, "Unsize trait requires a single type param");
const auto& dst_ty = this->m_ivars.get_type(params.m_types[0]);
- TU_IFLET( ::HIR::TypeRef::Data, dst_ty.m_data, TraitObject, e,
- // Magic impl if T: ThisTrait
- bool good;
- ::HIR::TypeRef::Data::Data_TraitObject tmp_e;
- tmp_e.m_trait.m_path = e.m_trait.m_path.m_path;
-
- ::HIR::Compare total_cmp = ::HIR::Compare::Equal;
- if( e.m_trait.m_path.m_path == ::HIR::SimplePath() ) {
- ASSERT_BUG(sp, e.m_markers.size() > 0, "TraitObject with no traits - " << dst_ty);
- good = true;
- }
- else {
- good = find_trait_impls(sp, e.m_trait.m_path.m_path, e.m_trait.m_path.m_params, ty,
- [&](const auto impl, auto cmp){
- if( cmp == ::HIR::Compare::Unequal )
- return false;
- total_cmp &= cmp;
- tmp_e.m_trait.m_path.m_params = impl.get_trait_params();
- for(const auto& aty : e.m_trait.m_type_bounds) {
- auto atyv = impl.get_type(aty.first.c_str());
- if( atyv == ::HIR::TypeRef() )
- {
- // Get the trait from which this associated type comes.
- // Insert a UfcsKnown path for that
- auto p = ::HIR::Path( ty.clone(), e.m_trait.m_path.clone(), aty.first );
- // Run EAT
- atyv = this->expand_associated_types( sp, ::HIR::TypeRef::new_path( mv$(p), {} ) );
- }
- tmp_e.m_trait.m_type_bounds[aty.first] = mv$(atyv);
- }
- return true;
- });
- }
- auto cb = [&](const auto impl, auto cmp){
- if( cmp == ::HIR::Compare::Unequal )
- return false;
- total_cmp &= cmp;
- tmp_e.m_markers.back().m_params = impl.get_trait_params();
- return true;
- };
- for(const auto& marker : e.m_markers)
- {
- if(!good) break;
- tmp_e.m_markers.push_back( marker.m_path );
- good &= find_trait_impls(sp, marker.m_path, marker.m_params, ty, cb);
- }
- if( good ) {
- ::HIR::PathParams real_params { ::HIR::TypeRef( ::HIR::TypeRef::Data(mv$(tmp_e)) ) };
- return callback( ImplRef(type.clone(), mv$(real_params), {}), total_cmp );
- }
- else {
- return false;
- }
- )
+ if( find_trait_impls_bound(sp, trait, params, type, callback) )
+ return true;
- // [T;N] -> [T] is handled down with array indexing
+ bool rv = false;
+ auto cb = [&](auto new_dst) {
+ ::HIR::PathParams real_params { mv$(new_dst) };
+ rv = callback( ImplRef(type.clone(), mv$(real_params), {}), ::HIR::Compare::Fuzzy );
+ };
+ auto cmp = this->can_unsize(sp, dst_ty, type, cb);
+ if( cmp == ::HIR::Compare::Equal )
+ {
+ assert(!rv);
+ rv = callback( ImplRef(type.clone(), params.clone(), {}), ::HIR::Compare::Equal );
+ }
+ return rv;
}
// Magical CoerceUnsized impls for various types
@@ -1278,23 +1273,6 @@ bool TraitResolution::find_trait_impls(const Span& sp,
*/
return false;
}
-
- // Unsize impl for arrays
- if( trait == lang_Unsize )
- {
- ASSERT_BUG(sp, params.m_types.size() == 1, "");
- const auto& dst_ty = m_ivars.get_type( params.m_types[0] );
-
- TU_IFLET(::HIR::TypeRef::Data, dst_ty.m_data, Slice, e2,
- auto cmp = e.inner->compare_with_placeholders(sp, *e2.inner, m_ivars.callback_resolve_infer());
- if( cmp != ::HIR::Compare::Unequal ) {
- ::HIR::PathParams pp;
- // - <[`array_inner`]> so it can be matched with the param by the caller
- pp.m_types.push_back( ::HIR::TypeRef::new_slice(e.inner->clone()) );
- return callback( ImplRef(type.clone(), mv$(pp), {}), cmp );
- }
- )
- }
)
@@ -1344,44 +1322,6 @@ bool TraitResolution::find_trait_impls(const Span& sp,
return rv;
}
- // Trait objects can unsize to a subset of their traits.
- if( trait == lang_Unsize )
- {
- ASSERT_BUG(sp, params.m_types.size() == 1, "");
- const auto& dst_ty = m_ivars.get_type( params.m_types[0] );
- if( ! dst_ty.m_data.is_TraitObject() ) {
- // If the destination isn't a trait object, don't even bother
- return false;
- }
- const auto& e2 = dst_ty.m_data.as_TraitObject();
-
- auto cmp = ::HIR::Compare::Equal;
-
- // TODO: Fuzzy compare
- if( e2.m_trait != e.m_trait ) {
- return false;
- }
- // The destination must have a strict subset of marker traits.
- const auto& src_markers = e.m_markers;
- const auto& dst_markers = e2.m_markers;
- for(const auto& mt : dst_markers)
- {
- // TODO: Fuzzy match
- bool found = false;
- for(const auto& omt : src_markers) {
- if( omt == mt ) {
- found = true;
- break;
- }
- }
- if( !found ) {
- // Return early.
- return false;
- }
- }
-
- return callback( ImplRef(&type, &e.m_trait.m_path.m_params, &e.m_trait.m_type_bounds), cmp );
- }
)
TU_IFLET(::HIR::TypeRef::Data, type.m_data, ErasedType, e,
@@ -1438,15 +1378,9 @@ bool TraitResolution::find_trait_impls(const Span& sp,
ASSERT_BUG(sp, e.path.m_data.is_UfcsKnown(), "Opaque bound type wasn't UfcsKnown - " << type);
const auto& pe = e.path.m_data.as_UfcsKnown();
- // If this associated type has a bound of the desired trait, return it.
- const auto& trait_ref = m_crate.get_trait_by_path(sp, pe.trait.m_path);
- ASSERT_BUG(sp, trait_ref.m_types.count( pe.item ) != 0, "Trait " << pe.trait.m_path << " doesn't contain an associated type " << pe.item);
- const auto& aty_def = trait_ref.m_types.find(pe.item)->second;
-
auto monomorph_cb = monomorphise_type_get_cb(sp, &*pe.type, &pe.trait.m_params, nullptr, nullptr);
- for(const auto& bound : aty_def.m_trait_bounds)
- {
+ auto rv = this->iterate_aty_bounds(sp, pe, [&](const auto& bound) {
const auto& b_params = bound.m_path.m_params;
::HIR::PathParams params_mono_o;
const auto& b_params_mono = (monomorphise_pathparams_needed(b_params) ? params_mono_o = monomorphise_path_params_with(sp, b_params, monomorph_cb, false) : b_params);
@@ -1477,58 +1411,14 @@ bool TraitResolution::find_trait_impls(const Span& sp,
return cmp != ::HIR::Compare::Unequal && callback( ImplRef(i_ty.clone(), i_params.clone(), {}), cmp );
});
if( ret )
- return true;
- }
- // TODO: Search `<Self as Trait>::Name` bounds on the trait itself
- for(const auto& bound : trait_ref.m_params.m_bounds)
- {
- if( ! bound.is_TraitBound() ) continue ;
- const auto& be = bound.as_TraitBound();
-
- if( ! be.type.m_data.is_Path() ) continue ;
- if( ! be.type.m_data.as_Path().binding.is_Opaque() ) continue ;
-
- const auto& be_type_pe = be.type.m_data.as_Path().path.m_data.as_UfcsKnown();
- if( *be_type_pe.type != ::HIR::TypeRef("Self", 0xFFFF) )
- continue ;
- if( be_type_pe.trait.m_path != pe.trait.m_path )
- continue ;
- if( be_type_pe.item != pe.item )
- continue ;
-
- // TODO: Merge the below code with the code from the above loop.
- const auto& b_params = be.trait.m_path.m_params;
- ::HIR::PathParams params_mono_o;
- const auto& b_params_mono = (monomorphise_pathparams_needed(b_params) ? params_mono_o = monomorphise_path_params_with(sp, b_params, monomorph_cb, false) : b_params);
-
- if( be.trait.m_path.m_path == trait )
{
- auto cmp = this->compare_pp(sp, b_params_mono, params);
- if( cmp != ::HIR::Compare::Unequal )
- {
- if( &b_params_mono == &params_mono_o )
- {
- if( callback( ImplRef(type.clone(), mv$(params_mono_o), {}), cmp ) )
- return true;
- params_mono_o = monomorphise_path_params_with(sp, b_params, monomorph_cb, false);
- }
- else
- {
- if( callback( ImplRef(&type, &b_params, &null_assoc), cmp ) )
- return true;
- }
- }
- }
-
- bool ret = this->find_named_trait_in_trait(sp, trait, params, *be.trait.m_trait_ptr, be.trait.m_path.m_path, b_params_mono, type,
- [&](const auto& i_ty, const auto& i_params, const auto& i_assoc) {
- auto cmp = this->compare_pp(sp, i_params, params);
- DEBUG("cmp=" << cmp << ", impl " << trait << i_params << " for " << i_ty << " -- desired " << trait << params);
- return cmp != ::HIR::Compare::Unequal && callback( ImplRef(i_ty.clone(), i_params.clone(), {}), cmp );
- });
- if( ret )
+ // NOTE: Callback called in closure's return statement
return true;
- }
+ }
+ return false;
+ });
+ if( rv )
+ return true;
}
)
@@ -2406,7 +2296,7 @@ bool TraitResolution::find_trait_impls_crate(const Span& sp,
// NOTE: Expected behavior is for Ivars to return false
// TODO: Should they return Compare::Fuzzy instead?
if( type.m_data.is_Infer() ) {
- return false;
+ return callback( ImplRef(&type, params_ptr, &null_assoc), ::HIR::Compare::Fuzzy );
}
const ::HIR::TraitMarkings* markings = nullptr;
@@ -3096,6 +2986,270 @@ bool TraitResolution::trait_contains_type(const Span& sp, const ::HIR::GenericPa
)
)
}
+// Checks if a type can unsize to another
+// - Returns Compare::Equal if the unsize is possible and fully known
+// - Returns Compare::Fuzzy if the unsize is possible, but still unknown.
+// - Returns Compare::Unequal if the unsize is impossibe (for any reason)
+//
+// Closure is called `get_new_type` is true, and the unsize is possible
+//
+// usecases:
+// - Checking for an impl as part of impl selection (return True/False/Maybe with required match for Maybe)
+// - Checking for an impl as part of typeck (return True/False/Maybe with unsize possibility OR required equality)
+::HIR::Compare TraitResolution::can_unsize(
+ const Span& sp, const ::HIR::TypeRef& dst_ty, const ::HIR::TypeRef& src_ty,
+ ::std::function<void(::HIR::TypeRef new_dst)>* new_type_callback,
+ ::std::function<void(const ::HIR::TypeRef& dst, const ::HIR::TypeRef& src)>* infer_callback
+ ) const
+{
+ TRACE_FUNCTION_F(dst_ty << " <- " << src_ty);
+ const auto& lang_Unsize = this->m_crate.get_lang_item_path(sp, "unsize");
+
+ // 1. Test for type equality
+ {
+ auto cmp = dst_ty.compare_with_placeholders(sp, src_ty, m_ivars.callback_resolve_infer());
+ if( cmp == ::HIR::Compare::Equal )
+ {
+ return ::HIR::Compare::Unequal;
+ }
+ }
+
+ // 2. If either side is an ivar, fuzzy.
+ if( dst_ty.m_data.is_Infer() || src_ty.m_data.is_Infer() )
+ {
+ // Inform the caller that these two types could unsize to each other
+ // - This allows the coercions code to move the coercion rule up
+ if( infer_callback )
+ {
+ (*infer_callback)(dst_ty, src_ty);
+ }
+ return ::HIR::Compare::Fuzzy;
+ }
+
+ {
+ bool found_bound = this->iterate_bounds([&](const auto& gb){
+ if(!gb.is_TraitBound())
+ return false;
+ const auto& be = gb.as_TraitBound();
+ if(be.trait.m_path.m_path != lang_Unsize)
+ return false;
+ const auto& be_dst = be.trait.m_path.m_params.m_types.at(0);
+
+ auto cmp = src_ty.compare_with_placeholders(sp, be.type, m_ivars.callback_resolve_infer());
+ if(cmp == ::HIR::Compare::Unequal) return false;
+
+ cmp &= dst_ty.compare_with_placeholders(sp, be_dst, m_ivars.callback_resolve_infer());
+ if(cmp == ::HIR::Compare::Unequal) return false;
+
+ if( cmp != ::HIR::Compare::Equal )
+ {
+ TODO(sp, "Found bound " << dst_ty << "=" << be_dst << " <- " << src_ty << "=" << be.type);
+ }
+ return true;
+ });
+ if( found_bound )
+ {
+ return ::HIR::Compare::Equal;
+ }
+ }
+
+ // Associated types, check the bounds in the trait.
+ if( src_ty.m_data.is_Path() && src_ty.m_data.as_Path().path.m_data.is_UfcsKnown() )
+ {
+ ::HIR::Compare rv = ::HIR::Compare::Equal;
+ const auto& pe = src_ty.m_data.as_Path().path.m_data.as_UfcsKnown();
+ auto monomorph_cb = monomorphise_type_get_cb(sp, &*pe.type, &pe.trait.m_params, nullptr, nullptr);
+ auto found_bound = this->iterate_aty_bounds(sp, pe, [&](const ::HIR::TraitPath& bound) {
+ if( bound.m_path.m_path != lang_Unsize )
+ return false;
+ const auto& be_dst_tpl = bound.m_path.m_params.m_types.at(0);
+ ::HIR::TypeRef tmp_ty;
+ const auto& be_dst = (monomorphise_type_needed(be_dst_tpl) ? tmp_ty = monomorphise_type_with(sp, be_dst_tpl, monomorph_cb) : be_dst_tpl);
+
+ auto cmp = dst_ty.compare_with_placeholders(sp, be_dst, m_ivars.callback_resolve_infer());
+ if(cmp == ::HIR::Compare::Unequal) return false;
+
+ if( cmp != ::HIR::Compare::Equal )
+ {
+ DEBUG("> Found bound (fuzzy) " << dst_ty << "=" << be_dst << " <- " << src_ty);
+ rv = ::HIR::Compare::Fuzzy;
+ }
+ return true;
+ });
+ if( found_bound )
+ {
+ return rv;
+ }
+ }
+
+ // Struct<..., T, ...>: Unsize<Struct<..., U, ...>>
+ if( dst_ty.m_data.is_Path() && src_ty.m_data.is_Path() )
+ {
+ bool dst_is_unsizable = dst_ty.m_data.as_Path().binding.is_Struct() && dst_ty.m_data.as_Path().binding.as_Struct()->m_markings.can_unsize;
+ bool src_is_unsizable = src_ty.m_data.as_Path().binding.is_Struct() && src_ty.m_data.as_Path().binding.as_Struct()->m_markings.can_unsize;
+ if( dst_is_unsizable || src_is_unsizable )
+ {
+ DEBUG("Struct unsize? " << dst_ty << " <- " << src_ty);
+ const auto& str = *dst_ty.m_data.as_Path().binding.as_Struct();
+ const auto& dst_gp = dst_ty.m_data.as_Path().path.m_data.as_Generic();
+ const auto& src_gp = src_ty.m_data.as_Path().path.m_data.as_Generic();
+
+ if( dst_gp == src_gp )
+ {
+ DEBUG("Can't Unsize, destination and source are identical");
+ return ::HIR::Compare::Unequal;
+ }
+ else if( dst_gp.m_path == src_gp.m_path )
+ {
+ DEBUG("Checking for Unsize " << dst_gp << " <- " << src_gp);
+ // Structures are equal, add the requirement that the ?Sized parameter also impl Unsize
+ const auto& dst_inner = m_ivars.get_type( dst_gp.m_params.m_types.at(str.m_markings.unsized_param) );
+ const auto& src_inner = m_ivars.get_type( src_gp.m_params.m_types.at(str.m_markings.unsized_param) );
+
+ auto cb = [&](auto d){
+ assert(new_type_callback);
+
+ // Re-create structure with s/d
+ auto dst_gp_new = dst_gp.clone();
+ dst_gp_new.m_params.m_types.at(str.m_markings.unsized_param) = mv$(d);
+ (*new_type_callback)( ::HIR::TypeRef::new_path(mv$(dst_gp_new), &str) );
+ };
+ if( new_type_callback )
+ {
+ ::std::function<void(::HIR::TypeRef)> cb_p = cb;
+ return this->can_unsize(sp, dst_inner, src_inner, &cb_p, infer_callback);
+ }
+ else
+ {
+ return this->can_unsize(sp, dst_inner, src_inner, nullptr, infer_callback);
+ }
+ }
+ else
+ {
+ DEBUG("Can't Unsize, destination and source are different structs");
+ return ::HIR::Compare::Unequal;
+ }
+ }
+ }
+
+ // (Trait) <- Foo
+ if( const auto* de = dst_ty.m_data.opt_TraitObject() )
+ {
+ // TODO: Check if src_ty is !Sized
+ // - Only allowed if the source is a trait object with the same data trait and lesser bounds
+
+ DEBUG("TraitObject unsize? " << dst_ty << " <- " << src_ty);
+
+ // (Trait) <- (Trait+Foo)
+ if( const auto* se = src_ty.m_data.opt_TraitObject() )
+ {
+ auto rv = ::HIR::Compare::Equal;
+ // 1. Data trait must be the same (TODO: Fuzzy)
+ if( de->m_trait != se->m_trait )
+ {
+ return ::HIR::Compare::Unequal;
+ }
+
+ // 2. Destination markers must be a strict subset
+ for(const auto& mt : de->m_markers)
+ {
+ // TODO: Fuzzy match
+ bool found = false;
+ for(const auto& omt : se->m_markers) {
+ if( omt == mt ) {
+ found = true;
+ break;
+ }
+ }
+ if( !found ) {
+ // Return early.
+ return ::HIR::Compare::Unequal;
+ }
+ }
+
+ if( rv == ::HIR::Compare::Fuzzy && new_type_callback )
+ {
+ // TODO: Inner type
+ }
+ return ::HIR::Compare::Equal;
+ }
+
+ bool good;
+ ::HIR::Compare total_cmp = ::HIR::Compare::Equal;
+
+ ::HIR::TypeRef::Data::Data_TraitObject tmp_e;
+ tmp_e.m_trait.m_path = de->m_trait.m_path.m_path;
+
+ // Check data trait first.
+ if( de->m_trait.m_path.m_path == ::HIR::SimplePath() ) {
+ ASSERT_BUG(sp, de->m_markers.size() > 0, "TraitObject with no traits - " << dst_ty);
+ good = true;
+ }
+ else {
+ good = find_trait_impls(sp, de->m_trait.m_path.m_path, de->m_trait.m_path.m_params, src_ty,
+ [&](const auto impl, auto cmp) {
+ if( cmp == ::HIR::Compare::Unequal )
+ return false;
+ total_cmp &= cmp;
+ tmp_e.m_trait.m_path.m_params = impl.get_trait_params();
+ for(const auto& aty : de->m_trait.m_type_bounds) {
+ auto atyv = impl.get_type(aty.first.c_str());
+ if( atyv == ::HIR::TypeRef() )
+ {
+ // Get the trait from which this associated type comes.
+ // Insert a UfcsKnown path for that
+ auto p = ::HIR::Path( src_ty.clone(), de->m_trait.m_path.clone(), aty.first );
+ // Run EAT
+ atyv = this->expand_associated_types( sp, ::HIR::TypeRef::new_path( mv$(p), {} ) );
+ }
+ tmp_e.m_trait.m_type_bounds[aty.first] = mv$(atyv);
+ }
+ return true;
+ });
+ }
+
+ // Then markers
+ auto cb = [&](const auto impl, auto cmp){
+ if( cmp == ::HIR::Compare::Unequal )
+ return false;
+ total_cmp &= cmp;
+ tmp_e.m_markers.back().m_params = impl.get_trait_params();
+ return true;
+ };
+ for(const auto& marker : de->m_markers)
+ {
+ if(!good) break;
+ tmp_e.m_markers.push_back( marker.m_path );
+ good &= find_trait_impls(sp, marker.m_path, marker.m_params, src_ty, cb);
+ }
+
+ if( good && total_cmp == ::HIR::Compare::Fuzzy && new_type_callback )
+ {
+ (*new_type_callback)( ::HIR::TypeRef(mv$(tmp_e)) );
+ }
+ return total_cmp;
+ }
+
+ // [T] <- [T; n]
+ if( const auto* de = dst_ty.m_data.opt_Slice() )
+ {
+ if( const auto* se = src_ty.m_data.opt_Array() )
+ {
+ DEBUG("Array unsize? " << *de->inner << " <- " << *se->inner);
+ auto cmp = de->inner->compare_with_placeholders(sp, *se->inner, m_ivars.callback_resolve_infer());
+ // TODO: Indicate to caller that for this to be true, these two must be the same.
+ // - I.E. if true, equate these types
+ if(cmp == ::HIR::Compare::Fuzzy && new_type_callback)
+ {
+ (*new_type_callback)( ::HIR::TypeRef::new_slice( se->inner->clone() ) );
+ }
+ return cmp;
+ }
+ }
+
+ DEBUG("Can't unsize, no rules matched");
+ return ::HIR::Compare::Unequal;
+}
const ::HIR::TypeRef* TraitResolution::type_is_owned_box(const Span& sp, const ::HIR::TypeRef& ty) const
{
TU_IFLET(::HIR::TypeRef::Data, ty.m_data, Path, e,
@@ -3118,7 +3272,7 @@ const ::HIR::TypeRef* TraitResolution::autoderef(const Span& sp, const ::HIR::Ty
const auto& ty = this->m_ivars.get_type(ty_in);
TU_IFLET(::HIR::TypeRef::Data, ty.m_data, Borrow, e,
DEBUG("Deref " << ty << " into " << *e.inner);
- return &*e.inner;
+ return &this->m_ivars.get_type(*e.inner);
)
// TODO: Just doing `*[1,2,3]` doesn't work, but this is needed to allow `[1,2,3].iter()` to work
else TU_IFLET(::HIR::TypeRef::Data, ty.m_data, Array, e,
diff --git a/src/hir_typeck/helpers.hpp b/src/hir_typeck/helpers.hpp
index a1db0af9..b7e6ca38 100644
--- a/src/hir_typeck/helpers.hpp
+++ b/src/hir_typeck/helpers.hpp
@@ -201,6 +201,7 @@ public:
/// Iterate over in-scope bounds (function then top)
bool iterate_bounds( ::std::function<bool(const ::HIR::GenericBound&)> cb) const;
+ bool iterate_aty_bounds(const Span& sp, const ::HIR::Path::Data::Data_UfcsKnown& pe, ::std::function<bool(const ::HIR::TraitPath&)> cb) const;
typedef ::std::function<bool(const ::HIR::TypeRef&, const ::HIR::PathParams&, const ::std::map< ::std::string,::HIR::TypeRef>&)> t_cb_trait_impl;
typedef ::std::function<bool(ImplRef, ::HIR::Compare)> t_cb_trait_impl_r;
@@ -265,6 +266,13 @@ public:
bool trait_contains_type(const Span& sp, const ::HIR::GenericPath& trait_path, const ::HIR::Trait& trait_ptr, const ::std::string& name, ::HIR::GenericPath& out_path) const;
::HIR::Compare type_is_copy(const Span& sp, const ::HIR::TypeRef& ty) const;
+ // If `new_type_callback` is populated, it will be called with the actual/possible dst_type
+ // If `infer_callback` is populated, it will be called when either side is an ivar
+ ::HIR::Compare can_unsize(const Span& sp, const ::HIR::TypeRef& dst_ty, const ::HIR::TypeRef& src_ty, ::std::function<void(::HIR::TypeRef new_dst)> new_type_callback) const {
+ return can_unsize(sp, dst_ty, src_ty, &new_type_callback);
+ }
+ ::HIR::Compare can_unsize(const Span& sp, const ::HIR::TypeRef& dst_ty, const ::HIR::TypeRef& src_ty, ::std::function<void(::HIR::TypeRef new_dst)>* new_type_callback, ::std::function<void(const ::HIR::TypeRef& dst, const ::HIR::TypeRef& src)>* infer_callback=nullptr) const;
+
const ::HIR::TypeRef* type_is_owned_box(const Span& sp, const ::HIR::TypeRef& ty) const;
private:
diff --git a/src/hir_typeck/static.cpp b/src/hir_typeck/static.cpp
index 3bae5ba8..8baa2557 100644
--- a/src/hir_typeck/static.cpp
+++ b/src/hir_typeck/static.cpp
@@ -237,12 +237,11 @@ bool StaticTraitResolve::find_impl(
auto monomorph_cb = monomorphise_type_get_cb(sp, &*pe.type, &pe.trait.m_params, nullptr, nullptr);
- for(const auto& bound : aty_def.m_trait_bounds)
- {
+ auto check_bound = [&](const ::HIR::TraitPath& bound) {
const auto& b_params = bound.m_path.m_params;
::HIR::PathParams params_mono_o;
const auto& b_params_mono = (monomorphise_pathparams_needed(b_params) ? params_mono_o = monomorphise_path_params_with(sp, b_params, monomorph_cb, false) : b_params);
- DEBUG(": " << bound.m_path.m_path << b_params_mono);
+ DEBUG("[find_impl] : " << bound.m_path.m_path << b_params_mono);
if( bound.m_path.m_path == trait_path )
{
@@ -279,9 +278,40 @@ bool StaticTraitResolve::find_impl(
DEBUG("impl " << trait_path << i_params << " for " << type << " -- desired " << trait_path << *trait_params);
return found_cb( ImplRef(type.clone(), i_params.clone(), {}), false );
});
- if( ret )
+ return ret;
+ };
+
+ for(const auto& bound : aty_def.m_trait_bounds)
+ {
+ if( check_bound(bound) )
+ return true;
+ }
+
+ // Check `where` clauses on the trait too
+ for(const auto& bound : trait_ref.m_params.m_bounds)
+ {
+ if( !bound.is_TraitBound() ) continue;
+ const auto& be = bound.as_TraitBound();
+
+ DEBUG("be.type = " << be.type);
+ if( !be.type.m_data.is_Path() )
+ continue;
+ if( !be.type.m_data.as_Path().path.m_data.is_UfcsKnown() )
+ continue ;
+ {
+ const auto& pe2 = be.type.m_data.as_Path().path.m_data.as_UfcsKnown();
+ if( *pe2.type != ::HIR::TypeRef("Self",GENERIC_Self) )
+ continue ;
+ if( pe2.trait.m_path != pe.trait.m_path )
+ continue ;
+ if( pe2.item != pe.item )
+ continue ;
+ }
+
+ if( check_bound(be.trait) )
return true;
}
+
DEBUG("- No bounds matched");
}
)
@@ -564,7 +594,7 @@ bool StaticTraitResolve::find_impl__check_crate_raw(
{
const auto& e = *ep;
- DEBUG("[find_impl] Trait bound " << e.type << " : " << e.trait);
+ DEBUG("Trait bound " << e.type << " : " << e.trait);
auto b_ty_mono = monomorphise_type_with(sp, e.type, cb_monomorph);
this->expand_associated_types(sp, b_ty_mono);
auto b_tp_mono = monomorphise_traitpath_with(sp, e.trait, cb_monomorph, false);
@@ -575,14 +605,15 @@ bool StaticTraitResolve::find_impl__check_crate_raw(
// TODO: These should be tagged with the source trait and that source trait used for expansion.
this->expand_associated_types(sp, assoc_bound.second);
}
- DEBUG("[find_impl] - b_ty_mono = " << b_ty_mono << ", b_tp_mono = " << b_tp_mono);
+ DEBUG("- b_ty_mono = " << b_ty_mono << ", b_tp_mono = " << b_tp_mono);
// HACK: If the type is '_', assume the bound passes
if( b_ty_mono.m_data.is_Infer() ) {
continue ;
}
// TODO: This is extrememly inefficient (looks up the trait impl 1+N times)
- if( b_tp_mono.m_type_bounds.size() > 0 ) {
+ if( b_tp_mono.m_type_bounds.size() > 0 )
+ {
//
for(const auto& assoc_bound : b_tp_mono.m_type_bounds) {
const auto& aty_name = assoc_bound.first;
@@ -603,7 +634,7 @@ bool StaticTraitResolve::find_impl__check_crate_raw(
//auto cmp = have .match_test_generics_fuzz(sp, exp, cb_ident, cb_match);
auto cmp = exp .match_test_generics_fuzz(sp, have, cb_ident, cb_match);
- ASSERT_BUG(sp, cmp == ::HIR::Compare::Equal, "Assoc ty " << aty_name << " mismatch, " << have << " != des " << exp);
+ ASSERT_BUG(sp, cmp != ::HIR::Compare::Unequal, "Assoc ty " << aty_name << " mismatch, " << have << " != des " << exp);
return true;
});
}
@@ -613,10 +644,13 @@ bool StaticTraitResolve::find_impl__check_crate_raw(
}
}
}
- else
+
+ // TODO: Detect if the associated type bound above is from directly the bounded trait, and skip this if it's the case
+ //else
{
bool rv = false;
if( b_ty_mono.m_data.is_Generic() && (b_ty_mono.m_data.as_Generic().binding >> 8) == 2 ) {
+ DEBUG("- Placeholder param " << b_ty_mono << ", magic success");
rv = true;
}
else {
@@ -1401,6 +1435,151 @@ bool StaticTraitResolve::type_is_sized(const Span& sp, const ::HIR::TypeRef& ty)
throw "";
}
+bool StaticTraitResolve::type_needs_drop_glue(const Span& sp, const ::HIR::TypeRef& ty) const
+{
+ // If `T: Copy`, then it can't need drop glue
+ if( type_is_copy(sp, ty) )
+ return false;
+
+ TU_MATCH(::HIR::TypeRef::Data, (ty.m_data), (e),
+ (Generic,
+ return true;
+ ),
+ (Path,
+ if( e.binding.is_Opaque() )
+ return true;
+
+ auto pp = ::HIR::PathParams();
+ bool has_direct_drop = this->find_impl(sp, m_lang_Drop, &pp, ty, [&](auto , bool){ return true; }, true);
+ if( has_direct_drop )
+ return true;
+
+ ::HIR::TypeRef tmp_ty;
+ const auto& pe = e.path.m_data.as_Generic();
+ auto monomorph_cb = monomorphise_type_get_cb(sp, nullptr, &pe.m_params, nullptr, nullptr);
+ auto monomorph = [&](const auto& tpl)->const ::HIR::TypeRef& {
+ if( monomorphise_type_needed(tpl) ) {
+ tmp_ty = monomorphise_type_with(sp, tpl, monomorph_cb, false);
+ this->expand_associated_types(sp, tmp_ty);
+ return tmp_ty;
+ }
+ else {
+ return tpl;
+ }
+ };
+ TU_MATCHA( (e.binding), (pbe),
+ (Unbound,
+ BUG(sp, "Unbound path");
+ ),
+ (Opaque,
+ // Technically a bug, checked above
+ return true;
+ ),
+ (Struct,
+ TU_MATCHA( (pbe->m_data), (se),
+ (Unit,
+ ),
+ (Tuple,
+ for(const auto& e : se)
+ {
+ if( type_needs_drop_glue(sp, monomorph(e.ent)) )
+ return true;
+ }
+ ),
+ (Named,
+ for(const auto& e : se)
+ {
+ if( type_needs_drop_glue(sp, monomorph(e.second.ent)) )
+ return true;
+ }
+ )
+ )
+ return false;
+ ),
+ (Enum,
+ for(const auto& e : pbe->m_variants)
+ {
+ TU_MATCHA( (e.second), (ve),
+ (Unit,
+ ),
+ (Value,
+ ),
+ (Tuple,
+ for(const auto& e : ve)
+ {
+ if( type_needs_drop_glue(sp, monomorph(e.ent)) )
+ return true;
+ }
+ ),
+ (Struct,
+ for(const auto& e : ve)
+ {
+ if( type_needs_drop_glue(sp, monomorph(e.second.ent)) )
+ return true;
+ }
+ )
+ )
+ }
+ return false;
+ ),
+ (Union,
+ // Unions don't have drop glue unless they impl Drop
+ return false;
+ )
+ )
+ ),
+ (Diverge,
+ return false;
+ ),
+ (Closure,
+ // TODO: Destructure?
+ return true;
+ ),
+ (Infer,
+ BUG(sp, "type_needs_drop_glue on _");
+ return false;
+ ),
+ (Borrow,
+ // &-ptrs don't have drop glue
+ if( e.type != ::HIR::BorrowType::Owned )
+ return false;
+ return type_needs_drop_glue(sp, *e.inner);
+ ),
+ (Pointer,
+ return false;
+ ),
+ (Function,
+ return false;
+ ),
+ (Primitive,
+ return false;
+ ),
+ (Array,
+ return type_needs_drop_glue(sp, *e.inner);
+ ),
+ (Slice,
+ return type_needs_drop_glue(sp, *e.inner);
+ ),
+ (TraitObject,
+ return true;
+ ),
+ (ErasedType,
+ // Is this an error?
+ return true;
+ ),
+ (Tuple,
+ for(const auto& ty : e)
+ {
+ if( !type_needs_drop_glue(sp, ty) )
+ return true;
+ }
+ return false;
+ )
+ )
+ assert(!"Fell off the end of type_needs_drop_glue");
+ throw "";
+}
+
const ::HIR::TypeRef* StaticTraitResolve::is_type_owned_box(const ::HIR::TypeRef& ty) const
{
if( ! ty.m_data.is_Path() ) {
@@ -1482,7 +1661,44 @@ StaticTraitResolve::ValuePtr StaticTraitResolve::get_value(const Span& sp, const
}
else
{
- TODO(sp, "Search for trait impl");
+ ImplRef best_impl;
+ this->find_impl(sp, pe.trait.m_path, &pe.trait.m_params, *pe.type, [&](auto impl, bool is_fuzz)->bool{
+ if( ! impl.m_data.is_TraitImpl() )
+ return false;
+ const auto& ti = *impl.m_data.as_TraitImpl().impl;
+ auto it = ti.m_constants.find(pe.item);
+ if(it == ti.m_constants.end())
+ return false;
+
+ if( impl.more_specific_than(best_impl) )
+ {
+ best_impl = mv$(impl);
+ // If this value is specialisable, keep searching (return false)
+ return !it->second.is_specialisable;
+ }
+ // Keep searching
+ return false;
+ });
+ if( !best_impl.is_valid() )
+ {
+ TODO(sp, "What should be done when an impl can't be found? " << p);
+ }
+
+ if( ! best_impl.m_data.is_TraitImpl() )
+ TODO(sp, "Use bounded constant values for " << p);
+ auto& ie = best_impl.m_data.as_TraitImpl();
+ out_params.pp_impl = &out_params.pp_impl_data;
+ for(auto ptr : ie.params)
+ {
+ // TODO: Avoid cloning when the params are in the placeholder array
+ out_params.pp_impl_data.m_types.push_back( ptr->clone() );
+ }
+
+ const auto& ti = *ie.impl;
+ const auto& c = ti.m_constants.at(pe.item);
+
+ // TODO: What if the type requires monomorphisation? Leave it up to the caller
+ return &c.data;
}
throw "";
),
@@ -1494,6 +1710,7 @@ StaticTraitResolve::ValuePtr StaticTraitResolve::get_value(const Span& sp, const
m_crate.find_type_impls(*pe.type, [](const auto&x)->const ::HIR::TypeRef& { return x; }, [&](const auto& impl) {
DEBUG("Found impl" << impl.m_params.fmt_args() << " " << impl.m_type);
// TODO: Populate pp_impl
+ // TODO: Specialisation
{
auto fit = impl.m_methods.find(pe.item);
if( fit != impl.m_methods.end() )
diff --git a/src/hir_typeck/static.hpp b/src/hir_typeck/static.hpp
index cde9797c..16302218 100644
--- a/src/hir_typeck/static.hpp
+++ b/src/hir_typeck/static.hpp
@@ -178,6 +178,9 @@ public:
bool type_is_copy(const Span& sp, const ::HIR::TypeRef& ty) const;
bool type_is_sized(const Span& sp, const ::HIR::TypeRef& ty) const;
+ /// Returns `true` if the passed type either implements Drop, or contains a type that implements Drop
+ bool type_needs_drop_glue(const Span& sp, const ::HIR::TypeRef& ty) const;
+
const ::HIR::TypeRef* is_type_owned_box(const ::HIR::TypeRef& ty) const;
const ::HIR::TypeRef* is_type_phantom_data(const ::HIR::TypeRef& ty) const;
diff --git a/src/include/main_bindings.hpp b/src/include/main_bindings.hpp
index c01ff86d..184f266f 100644
--- a/src/include/main_bindings.hpp
+++ b/src/include/main_bindings.hpp
@@ -16,6 +16,7 @@ extern AST::Crate Parse_Crate(::std::string mainfile);
extern void Expand(::AST::Crate& crate);
+extern void Expand_TestHarness(::AST::Crate& crate);
/// Process #[] decorators
extern void Process_Decorators(AST::Crate& crate);
diff --git a/src/main.cpp b/src/main.cpp
index 4c8d3104..8616ff2f 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -68,6 +68,7 @@ void init_debug_list()
g_debug_disable_map.insert( "MIR Cleanup" );
g_debug_disable_map.insert( "MIR Optimise" );
g_debug_disable_map.insert( "MIR Validate PO" );
+ g_debug_disable_map.insert( "MIR Validate Full" );
g_debug_disable_map.insert( "HIR Serialise" );
g_debug_disable_map.insert( "Trans Enumerate" );
@@ -131,6 +132,11 @@ struct ProgramParams
::AST::Crate::Type crate_type = ::AST::Crate::Type::Unknown;
::std::string crate_name;
+ unsigned opt_level = 0;
+ bool emit_debug_info = false;
+
+ bool test_harness = false;
+
::std::vector<const char*> lib_search_dirs;
::std::vector<const char*> libraries;
@@ -167,10 +173,12 @@ int main(int argc, char *argv[])
ProgramParams params(argc, argv);
// Set up cfg values
+ Cfg_SetValue("rust_compiler", "mrustc");
// TODO: Target spec
Cfg_SetFlag("unix");
Cfg_SetFlag("linux");
Cfg_SetValue("target_os", "linux");
+ Cfg_SetValue("target_family", "unix");
Cfg_SetValue("target_pointer_width", "64");
Cfg_SetValue("target_endian", "little");
Cfg_SetValue("target_arch", "x86_64");
@@ -188,6 +196,11 @@ int main(int argc, char *argv[])
});
+ if( params.test_harness )
+ {
+ Cfg_SetFlag("test");
+ }
+
try
{
@@ -195,6 +208,7 @@ int main(int argc, char *argv[])
AST::Crate crate = CompilePhase<AST::Crate>("Parse", [&]() {
return Parse_Crate(params.infile);
});
+ crate.m_test_harness = params.test_harness;
if( params.last_stage == ProgramParams::STAGE_PARSE ) {
return 0;
@@ -210,6 +224,11 @@ int main(int argc, char *argv[])
Expand(crate);
});
+ if( params.test_harness )
+ {
+ Expand_TestHarness(crate);
+ }
+
// Extract the crate type and name from the crate attributes
auto crate_type = params.crate_type;
if( crate_type == ::AST::Crate::Type::Unknown ) {
@@ -254,6 +273,10 @@ int main(int argc, char *argv[])
}
}
crate.m_crate_name = crate_name;
+ if( params.test_harness )
+ {
+ crate.m_crate_name += "$test";
+ }
if( params.outfile == "" ) {
switch( crate.m_crate_type )
@@ -281,7 +304,7 @@ int main(int argc, char *argv[])
}
// Allocator and panic strategies
- if( crate.m_crate_type == ::AST::Crate::Type::Executable )
+ if( crate.m_crate_type == ::AST::Crate::Type::Executable || params.test_harness )
{
// TODO: Detect if an allocator crate is already present.
crate.load_extern_crate(Span(), "alloc_system");
@@ -290,6 +313,10 @@ int main(int argc, char *argv[])
// - `mrustc-main` lang item default
crate.m_lang_items.insert(::std::make_pair( ::std::string("mrustc-main"), ::AST::Path("", {AST::PathNode("main")}) ));
}
+ if( params.test_harness )
+ {
+ crate.load_extern_crate(Span(), "test");
+ }
// Resolve names to be absolute names (include references to the relevant struct/global/function)
// - This does name checking on types and free functions.
@@ -421,6 +448,13 @@ int main(int argc, char *argv[])
CompilePhaseV("MIR Cleanup", [&]() {
MIR_CleanupCrate(*hir_crate);
});
+ if( getenv("MRUSTC_FULL_VALIDATE_PREOPT") )
+ {
+ CompilePhaseV("MIR Validate Full", [&]() {
+ MIR_CheckCrate_Full(*hir_crate);
+ });
+ }
+
// Optimise the MIR
CompilePhaseV("MIR Optimise", [&]() {
MIR_OptimiseCrate(*hir_crate);
@@ -433,6 +467,12 @@ int main(int argc, char *argv[])
CompilePhaseV("MIR Validate PO", [&]() {
MIR_CheckCrate(*hir_crate);
});
+ // - Exhaustive MIR validation (follows every code path and checks variable validity)
+ // > DEBUGGING ONLY
+ CompilePhaseV("MIR Validate Full", [&]() {
+ if( getenv("MRUSTC_FULL_VALIDATE") )
+ MIR_CheckCrate_Full(*hir_crate);
+ });
if( params.last_stage == ProgramParams::STAGE_MIR ) {
return 0;
@@ -449,8 +489,14 @@ int main(int argc, char *argv[])
for(const char* libdir : params.libraries ) {
trans_opt.libraries.push_back( libdir );
}
+ trans_opt.emit_debug_info = params.emit_debug_info;
// Generate code for non-generic public items (if requested)
+ if( params.test_harness )
+ {
+ // If the test harness is enabled, override crate type to "Executable"
+ crate_type = ::AST::Crate::Type::Executable;
+ }
switch( crate_type )
{
case ::AST::Crate::Type::Unknown:
@@ -580,6 +626,12 @@ ProgramParams::ProgramParams(int argc, char *argv[])
}
this->outfile = argv[++i];
break;
+ case 'O':
+ this->opt_level = 2;
+ break;
+ case 'g':
+ this->emit_debug_info = true;
+ break;
default:
exit(1);
}
@@ -672,6 +724,9 @@ ProgramParams::ProgramParams(int argc, char *argv[])
exit(1);
}
}
+ else if( strcmp(arg, "--test") == 0 ) {
+ this->test_harness = true;
+ }
else {
::std::cerr << "Unknown option '" << arg << "'" << ::std::endl;
exit(1);
@@ -680,3 +735,54 @@ ProgramParams::ProgramParams(int argc, char *argv[])
}
}
+
+::std::ostream& operator<<(::std::ostream& os, const FmtEscaped& x)
+{
+ os << ::std::hex;
+ for(auto s = x.s; *s != '\0'; s ++)
+ {
+ switch(*s)
+ {
+ case '\0': os << "\\0"; break;
+ case '\n': os << "\\n"; break;
+ case '\\': os << "\\\\"; break;
+ case '"': os << "\\\""; break;
+ default:
+ uint8_t v = *s;
+ if( v < 0x80 )
+ {
+ if( v < ' ' || v > 0x7F )
+ os << "\\u{" << ::std::hex << (unsigned int)v << "}";
+ else
+ os << v;
+ }
+ else if( v < 0xC0 )
+ ;
+ else if( v < 0xE0 )
+ {
+ uint32_t val = (uint32_t)(v & 0x1F) << 6;
+ v = (uint8_t)*++s; if( (v & 0xC0) != 0x80 ) { s--; continue ; } val |= (uint32_t)v << 6;
+ os << "\\u{" << ::std::hex << val << "}";
+ }
+ else if( v < 0xF0 )
+ {
+ uint32_t val = (uint32_t)(v & 0x0F) << 12;
+ v = (uint8_t)*++s; if( (v & 0xC0) != 0x80 ) { s--; continue ; } val |= (uint32_t)v << 12;
+ v = (uint8_t)*++s; if( (v & 0xC0) != 0x80 ) { s--; continue ; } val |= (uint32_t)v << 6;
+ os << "\\u{" << ::std::hex << val << "}";
+ }
+ else if( v < 0xF8 )
+ {
+ uint32_t val = (uint32_t)(v & 0x07) << 18;
+ v = (uint8_t)*++s; if( (v & 0xC0) != 0x80 ) { s--; continue ; } val |= (uint32_t)v << 18;
+ v = (uint8_t)*++s; if( (v & 0xC0) != 0x80 ) { s--; continue ; } val |= (uint32_t)v << 12;
+ v = (uint8_t)*++s; if( (v & 0xC0) != 0x80 ) { s--; continue ; } val |= (uint32_t)v << 6;
+ os << "\\u{" << ::std::hex << val << "}";
+ }
+ break;
+ }
+ }
+ os << ::std::dec;
+ return os;
+}
+
diff --git a/src/mir/check.cpp b/src/mir/check.cpp
index 807f08aa..4b9dfd8b 100644
--- a/src/mir/check.cpp
+++ b/src/mir/check.cpp
@@ -84,53 +84,6 @@ namespace {
}
}
-namespace {
- template<typename T>
- struct RunIterable {
- const ::std::vector<T>& list;
- unsigned int ofs;
- ::std::pair<size_t,size_t> cur;
- RunIterable(const ::std::vector<T>& list):
- list(list), ofs(0)
- {
- advance();
- }
- void advance() {
- if( ofs < list.size() )
- {
- auto start = ofs;
- while(ofs < list.size() && list[ofs] == list[start])
- ofs ++;
- cur = ::std::make_pair(start, ofs-1);
- }
- else
- {
- ofs = list.size()+1;
- }
- }
- RunIterable<T> begin() { return *this; }
- RunIterable<T> end() { auto rv = *this; rv.ofs = list.size()+1; return rv; }
- bool operator==(const RunIterable<T>& x) {
- return x.ofs == ofs;
- }
- bool operator!=(const RunIterable<T>& x) {
- return !(*this == x);
- }
- void operator++() {
- advance();
- }
- const ::std::pair<size_t,size_t>& operator*() const {
- return this->cur;
- }
- const ::std::pair<size_t,size_t>* operator->() const {
- return &this->cur;
- }
- };
- template<typename T>
- RunIterable<T> runs(const ::std::vector<T>& x) {
- return RunIterable<T>(x);
- }
-}
//template<typename T>
//::std::ostream& operator<<(::std::ostream& os, const T& v) {
// v.fmt(os);
@@ -209,9 +162,9 @@ void MIR_Validate_ValState(::MIR::TypeResolve& state, const ::MIR::Function& fcn
return arguments.empty() && temporaries.empty() && variables.empty();
}
- bool merge(ValStates& other)
+ bool merge(unsigned bb_idx, ValStates& other)
{
- DEBUG("this=" << FMT_CB(ss,this->fmt(ss);) << ", other=" << FMT_CB(ss,other.fmt(ss);));
+ DEBUG("bb" << bb_idx << " this=" << FMT_CB(ss,this->fmt(ss);) << ", other=" << FMT_CB(ss,other.fmt(ss);));
if( this->empty() )
{
*this = other;
@@ -242,14 +195,17 @@ void MIR_Validate_ValState(::MIR::TypeResolve& state, const ::MIR::Function& fcn
),
(Argument,
MIR_ASSERT(state, e.idx < this->arguments.size(), "");
+ DEBUG("arg" << e.idx << " = " << (is_valid ? "Valid" : "Invalid"));
this->arguments[e.idx] = is_valid ? State::Valid : State::Invalid;
),
(Variable,
MIR_ASSERT(state, e < this->variables.size(), "");
+ DEBUG("var" << e << " = " << (is_valid ? "Valid" : "Invalid"));
this->variables[e] = is_valid ? State::Valid : State::Invalid;
),
(Temporary,
MIR_ASSERT(state, e.idx < this->temporaries.size(), "");
+ DEBUG("tmp" << e.idx << " = " << (is_valid ? "Valid" : "Invalid"));
this->temporaries[e.idx] = is_valid ? State::Valid : State::Invalid;
)
)
@@ -364,7 +320,7 @@ void MIR_Validate_ValState(::MIR::TypeResolve& state, const ::MIR::Function& fcn
// 1. Apply current state to `block_start_states` (merging if needed)
// - If no change happened, skip.
- if( ! block_start_states.at(block).merge( val_state ) ) {
+ if( ! block_start_states.at(block).merge(block, val_state) ) {
continue ;
}
DEBUG("BB" << block << " via [" << path << "]");
@@ -376,6 +332,7 @@ void MIR_Validate_ValState(::MIR::TypeResolve& state, const ::MIR::Function& fcn
const auto& stmt = bb.statements[stmt_idx];
state.set_cur_stmt(block, stmt_idx);
+ DEBUG(state << stmt);
switch( stmt.tag() )
{
case ::MIR::Statement::TAGDEAD:
@@ -453,11 +410,18 @@ void MIR_Validate_ValState(::MIR::TypeResolve& state, const ::MIR::Function& fcn
// Mark destination as valid
val_state.mark_validity( state, stmt.as_Assign().dst, true );
break;
+ case ::MIR::Statement::TAG_ScopeEnd:
+ //for(auto idx : stmt.as_ScopeEnd().vars)
+ // val_state.mark_validity(state, ::MIR::LValue::make_Variable(idx), false);
+ //for(auto idx : stmt.as_ScopeEnd().tmps)
+ // val_state.mark_validity(state, ::MIR::LValue::make_Temporary({idx}), false);
+ break;
}
}
// 3. Pass new state on to destination blocks
state.set_cur_stmt_term(block);
+ DEBUG(state << bb.terminator);
TU_MATCH(::MIR::Terminator, (bb.terminator), (e),
(Incomplete,
// Should be impossible here.
@@ -850,6 +814,9 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
case ::MIR::Statement::TAG_Drop:
// TODO: Anything need checking here?
break;
+ case ::MIR::Statement::TAG_ScopeEnd:
+ // TODO: Mark listed values as descoped
+ break;
}
}
diff --git a/src/mir/check_full.cpp b/src/mir/check_full.cpp
new file mode 100644
index 00000000..52c0aaf5
--- /dev/null
+++ b/src/mir/check_full.cpp
@@ -0,0 +1,981 @@
+/*
+ * MRustC - Rust Compiler
+ * - By John Hodge (Mutabah/thePowersGang)
+ *
+ * mir/check_full.cpp
+ * - Full MIR correctness checks (expensive value state checks)
+ */
+#include "main_bindings.hpp"
+#include "mir.hpp"
+#include <hir/visitor.hpp>
+#include <hir_typeck/static.hpp>
+#include <mir/helpers.hpp>
+#include <mir/visit_crate_mir.hpp>
+
+// DISABLED: Unsizing intentionally leaks
+#define ENABLE_LEAK_DETECTOR 0
+
+namespace
+{
+ struct State
+ {
+ // 0 = invalid
+ // -1 = valid
+ // other = 1-based index into `inner_states`
+ unsigned int index;
+
+ State(): index(0) {}
+ State(bool valid): index(valid ? ~0u : 0) {}
+ State(size_t idx):
+ index(idx+1)
+ {
+ }
+
+ bool is_composite() const {
+ return index != 0 && index != ~0u;
+ }
+ bool is_valid() const {
+ return index != 0;
+ }
+
+ bool operator==(const State& x) const {
+ return index == x.index;
+ }
+ bool operator!=(const State& x) const {
+ return !(*this == x);
+ }
+ };
+
+ struct ValueStates;
+}
+
+struct StateFmt {
+ const ValueStates& vss;
+ State s;
+ StateFmt( const ValueStates& vss, State s ):
+ vss(vss), s(s)
+ {}
+};
+
+namespace
+{
+ struct ValueStates
+ {
+ ::std::vector<State> vars;
+ ::std::vector<State> temporaries;
+ ::std::vector<State> arguments;
+ State return_value;
+ ::std::vector<bool> drop_flags;
+
+ ::std::vector< ::std::vector<State> > inner_states;
+
+ ::std::vector<unsigned int> bb_path;
+
+ ValueStates clone() const
+ {
+ return *this;
+ }
+ bool is_equivalent_to(const ValueStates& x) const
+ {
+ struct H {
+ static bool equal(const ValueStates& vss_a, const State& a, const ValueStates& vss_b, const State& b)
+ {
+ if( a.index == 0 )
+ {
+ return b.index == 0;
+ }
+ if( a.index == ~0u )
+ {
+ return b.index == ~0u;
+ }
+ if( b.index == 0 || b.index == ~0u )
+ {
+ return false;
+ }
+
+ const auto& states_a = vss_a.inner_states.at( a.index - 1 );
+ const auto& states_b = vss_b.inner_states.at( b.index - 1 );
+ // NOTE: If there's two differen variants, this can happen.
+ if( states_a.size() != states_b.size() )
+ return false;
+
+ for(size_t i = 0; i < states_a.size(); i ++)
+ {
+ if( ! H::equal(vss_a, states_a[i], vss_b, states_b[i]) )
+ return false;
+ }
+ // If the above loop didn't early exit, the two states are equal
+ return true;
+ }
+ };
+
+ if( this->drop_flags != x.drop_flags )
+ return false;
+ if( ! H::equal(*this, return_value, x, x.return_value) )
+ return false;
+ assert(vars.size() == x.vars.size());
+ for(size_t i = 0; i < vars.size(); i ++)
+ {
+ if( ! H::equal(*this, vars[i], x, x.vars[i]) )
+ return false;
+ }
+ assert(temporaries.size() == x.temporaries.size());
+ for(size_t i = 0; i < temporaries.size(); i ++)
+ {
+ if( ! H::equal(*this, temporaries[i], x, x.temporaries[i]) )
+ return false;
+ }
+ assert(arguments.size() == x.arguments.size());
+ for(size_t i = 0; i < arguments.size(); i ++)
+ {
+ if( ! H::equal(*this, arguments[i], x, x.arguments[i]) )
+ return false;
+ }
+ return true;
+ }
+
+ StateFmt fmt_state(const ::MIR::TypeResolve& mir_res, const ::MIR::LValue& lv) const {
+ return StateFmt(*this, get_lvalue_state(mir_res, lv));
+ }
+
+ void ensure_param_valid(const ::MIR::TypeResolve& mir_res, const ::MIR::Param& lv) const
+ {
+ if(const auto* e = lv.opt_LValue())
+ {
+ this->ensure_lvalue_valid(mir_res, *e);
+ }
+ }
+ void ensure_lvalue_valid(const ::MIR::TypeResolve& mir_res, const ::MIR::LValue& lv) const
+ {
+ auto vs = get_lvalue_state(mir_res, lv);
+ ::std::vector<unsigned int> path;
+ ensure_valid(mir_res, lv, vs, path);
+ }
+ private:
+ struct InvalidReason {
+ enum {
+ Unwritten,
+ Moved,
+ Invalidated,
+ } ty;
+ size_t bb;
+ size_t stmt;
+
+ void fmt(::std::ostream& os) const {
+ switch(this->ty)
+ {
+ case Unwritten: os << "Not Written"; break;
+ case Moved: os << "Moved at BB" << bb << "/" << stmt; break;
+ case Invalidated: os << "Invalidated at BB" << bb << "/" << stmt; break;
+ }
+ }
+ };
+ InvalidReason find_invalid_reason(const ::MIR::TypeResolve& mir_res, const ::MIR::LValue& root_lv) const
+ {
+ using ::MIR::visit::ValUsage;
+ using ::MIR::visit::visit_mir_lvalues;
+
+ ::HIR::TypeRef tmp;
+ bool is_copy = mir_res.m_resolve.type_is_copy( mir_res.sp, mir_res.get_lvalue_type(tmp, root_lv) );
+ size_t cur_stmt = mir_res.get_cur_stmt_ofs();
+
+ // Dump all statements
+ if(true)
+ {
+ for(size_t i = 0; i < this->bb_path.size()-1; i++)
+ {
+ size_t bb_idx = this->bb_path[i];
+ const auto& bb = mir_res.m_fcn.blocks.at(bb_idx);
+
+ for(size_t stmt_idx = 0; stmt_idx < bb.statements.size(); stmt_idx++)
+ {
+ DEBUG("BB" << bb_idx << "/" << stmt_idx << " - " << bb.statements[stmt_idx]);
+ }
+ DEBUG("BB" << bb_idx << "/TERM - " << bb.terminator);
+ }
+
+ {
+ size_t bb_idx = this->bb_path.back();
+ const auto& bb = mir_res.m_fcn.blocks.at(bb_idx);
+ for(size_t stmt_idx = 0; stmt_idx < cur_stmt; stmt_idx ++)
+ {
+ DEBUG("BB" << bb_idx << "/" << stmt_idx << " - " << bb.statements[stmt_idx]);
+ }
+ }
+ }
+
+ if( !is_copy )
+ {
+ // Walk backwards through the BBs and find where it's used by value
+ assert(this->bb_path.size() > 0);
+ size_t bb_idx;
+ size_t stmt_idx;
+
+ bool was_moved = false;
+ size_t moved_bb, moved_stmt;
+ auto visit_cb = [&](const auto& lv, auto vu) {
+ if(lv == root_lv && vu == ValUsage::Move) {
+ was_moved = true;
+ moved_bb = bb_idx;
+ moved_stmt = stmt_idx;
+ return false;
+ }
+ return false;
+ };
+ // Most recent block (incomplete)
+ {
+ bb_idx = this->bb_path.back();
+ const auto& bb = mir_res.m_fcn.blocks.at(bb_idx);
+ for(stmt_idx = cur_stmt; stmt_idx -- && !was_moved; )
+ {
+ visit_mir_lvalues(bb.statements[stmt_idx], visit_cb);
+ }
+ }
+ for(size_t i = this->bb_path.size()-1; i -- && !was_moved; )
+ {
+ bb_idx = this->bb_path[i];
+ const auto& bb = mir_res.m_fcn.blocks.at(bb_idx);
+ stmt_idx = bb.statements.size();
+
+ visit_mir_lvalues(bb.terminator, visit_cb);
+
+ for(stmt_idx = bb.statements.size(); stmt_idx -- && !was_moved; )
+ {
+ visit_mir_lvalues(bb.statements[stmt_idx], visit_cb);
+ }
+ }
+
+ if( was_moved )
+ {
+ // Reason found, the value was moved
+ DEBUG("- Moved in BB" << moved_bb << "/" << moved_stmt);
+ return InvalidReason { InvalidReason::Moved, moved_bb, moved_stmt };
+ }
+ }
+ else
+ {
+ // Walk backwards to find assignment (if none, it's never initialized)
+ assert(this->bb_path.size() > 0);
+ size_t bb_idx;
+ size_t stmt_idx;
+
+ bool assigned = false;
+ auto visit_cb = [&](const auto& lv, auto vu) {
+ if(lv == root_lv && vu == ValUsage::Write) {
+ assigned = true;
+ //assigned_bb = this->bb_path[i];
+ //assigned_stmt = j;
+ return true;
+ }
+ return false;
+ };
+
+ // Most recent block (incomplete)
+ {
+ bb_idx = this->bb_path.back();
+ const auto& bb = mir_res.m_fcn.blocks.at(bb_idx);
+ for(stmt_idx = cur_stmt; stmt_idx -- && !assigned; )
+ {
+ visit_mir_lvalues(bb.statements[stmt_idx], visit_cb);
+ }
+ }
+ for(size_t i = this->bb_path.size()-1; i -- && !assigned; )
+ {
+ bb_idx = this->bb_path[i];
+ const auto& bb = mir_res.m_fcn.blocks.at(bb_idx);
+ stmt_idx = bb.statements.size();
+
+ visit_mir_lvalues(bb.terminator, visit_cb);
+
+ for(stmt_idx = bb.statements.size(); stmt_idx -- && !assigned; )
+ {
+ visit_mir_lvalues(bb.statements[stmt_idx], visit_cb);
+ }
+ }
+
+ if( !assigned )
+ {
+ // Value wasn't ever assigned, that's why it's not valid.
+ DEBUG("- Not assigned");
+ return InvalidReason { InvalidReason::Unwritten, 0, 0 };
+ }
+ }
+ // If neither of the above return a reason, check for blocks that don't have the value valid.
+ // TODO: This requires access to the lifetime bitmaps to know where it was invalidated
+ DEBUG("- (assume) lifetime invalidated [is_copy=" << is_copy << "]");
+ return InvalidReason { InvalidReason::Invalidated, 0, 0 };
+ }
+ void ensure_valid(const ::MIR::TypeResolve& mir_res, const ::MIR::LValue& root_lv, const State& vs, ::std::vector<unsigned int>& path) const
+ {
+ if( vs.is_composite() )
+ {
+ MIR_ASSERT(mir_res, vs.index-1 < this->inner_states.size(), "");
+ const auto& states = this->inner_states.at( vs.index - 1 );
+
+ path.push_back(0);
+ for(const auto& inner_vs : states)
+ {
+ ensure_valid(mir_res,root_lv, inner_vs, path);
+ path.back() ++;
+ }
+ path.pop_back();
+ }
+ else if( !vs.is_valid() )
+ {
+ // Locate where it was invalidated.
+ auto reason = find_invalid_reason(mir_res, root_lv);
+ MIR_BUG(mir_res, "Accessing invalidated lvalue - " << root_lv << " - " << FMT_CB(s,reason.fmt(s);) << " - field path=[" << path << "], BBs=[" << this->bb_path << "]");
+ }
+ else
+ {
+ }
+ }
+
+ public:
+ void move_lvalue(const ::MIR::TypeResolve& mir_res, const ::MIR::LValue& lv)
+ {
+ this->ensure_lvalue_valid(mir_res, lv);
+
+ ::HIR::TypeRef tmp;
+ const auto& ty = mir_res.get_lvalue_type(tmp, lv);
+ if( mir_res.m_resolve.type_is_copy(mir_res.sp, ty) )
+ {
+ // NOTE: Copy types aren't moved.
+ }
+ else
+ {
+ this->set_lvalue_state(mir_res, lv, State(false));
+ }
+ }
+ void mark_lvalue_valid(const ::MIR::TypeResolve& mir_res, const ::MIR::LValue& lv)
+ {
+ this->set_lvalue_state(mir_res, lv, State(true));
+ }
+
+ // Scan states and clear unused composite slots
+ void garbage_collect()
+ {
+ struct Marker {
+ ::std::vector<bool> used;
+
+ void mark_from_state(const ValueStates& vss, const State& s) {
+ if(s.is_composite()) {
+ used.at(s.index-1) = true;
+ for(const auto& s : vss.inner_states.at(s.index-1))
+ mark_from_state(vss, s);
+
+ // TODO: Should this compact composites with all-equal inner states?
+ }
+ }
+ };
+ Marker m;
+ m.used.resize(this->inner_states.size(), false);
+
+ for(const auto& s : this->vars)
+ m.mark_from_state(*this, s);
+ for(const auto& s : this->temporaries)
+ m.mark_from_state(*this, s);
+ for(const auto& s : this->arguments)
+ m.mark_from_state(*this, s);
+ m.mark_from_state(*this, this->return_value);
+ }
+ private:
+ State allocate_composite(unsigned int n_fields, State basis)
+ {
+ assert(n_fields > 0);
+ for(size_t i = 0; i < this->inner_states.size(); i ++)
+ {
+ if( this->inner_states[i].size() == 0 )
+ {
+ inner_states[i] = ::std::vector<State>(n_fields, basis);
+ return State(i);
+ }
+ }
+ auto idx = inner_states.size();
+ inner_states.push_back( ::std::vector<State>(n_fields, basis) );
+ return State(idx);
+ }
+
+ public:
+ ::std::vector<State>& get_composite(const ::MIR::TypeResolve& mir_res, const State& vs)
+ {
+ MIR_ASSERT(mir_res, vs.index-1 < this->inner_states.size(), "");
+ return this->inner_states.at( vs.index - 1 );
+ }
+ const ::std::vector<State>& get_composite(const ::MIR::TypeResolve& mir_res, const State& vs) const
+ {
+ MIR_ASSERT(mir_res, vs.index-1 < this->inner_states.size(), "");
+ return this->inner_states.at( vs.index - 1 );
+ }
+ State get_lvalue_state(const ::MIR::TypeResolve& mir_res, const ::MIR::LValue& lv) const
+ {
+ TU_MATCHA( (lv), (e),
+ (Variable,
+ return vars.at(e);
+ ),
+ (Temporary,
+ return temporaries.at(e.idx);
+ ),
+ (Argument,
+ return arguments.at(e.idx);
+ ),
+ (Static,
+ return State(true);
+ ),
+ (Return,
+ return return_value;
+ ),
+ (Field,
+ auto vs = get_lvalue_state(mir_res, *e.val);
+ if( vs.is_composite() )
+ {
+ const auto& states = this->get_composite(mir_res, vs);
+ MIR_ASSERT(mir_res, e.field_index < states.size(), "Field index out of range");
+ return states[e.field_index];
+ }
+ else
+ {
+ return vs;
+ }
+ ),
+ (Deref,
+ auto vs = get_lvalue_state(mir_res, *e.val);
+ if( vs.is_composite() )
+ {
+ MIR_TODO(mir_res, "Deref with composite state");
+ }
+ else
+ {
+ return vs;
+ }
+ ),
+ (Index,
+ auto vs_v = get_lvalue_state(mir_res, *e.val);
+ auto vs_i = get_lvalue_state(mir_res, *e.idx);
+ MIR_ASSERT(mir_res, !vs_v.is_composite(), "");
+ MIR_ASSERT(mir_res, !vs_i.is_composite(), "");
+ return State(vs_v.is_valid() && vs_i.is_valid());
+ ),
+ (Downcast,
+ auto vs_v = get_lvalue_state(mir_res, *e.val);
+ if( vs_v.is_composite() )
+ {
+ const auto& states = this->get_composite(mir_res, vs_v);
+ MIR_ASSERT(mir_res, states.size() == 1, "Downcast on composite of invalid size");
+ return states[0];
+ }
+ else
+ {
+ return vs_v;
+ }
+ )
+ )
+ throw "";
+ }
+
+ void set_lvalue_state(const ::MIR::TypeResolve& mir_res, const ::MIR::LValue& lv, State new_vs)
+ {
+ TU_MATCHA( (lv), (e),
+ (Variable,
+ vars.at(e) = new_vs;
+ ),
+ (Temporary,
+ temporaries.at(e.idx) = new_vs;
+ ),
+ (Argument,
+ arguments.at(e.idx) = new_vs;
+ ),
+ (Static,
+ // Ignore.
+ ),
+ (Return,
+ return_value = new_vs;
+ ),
+ (Field,
+ auto cur_vs = get_lvalue_state(mir_res, *e.val);
+ if( !cur_vs.is_composite() && cur_vs == new_vs )
+ {
+ // Not a composite, and no state change
+ }
+ else
+ {
+ if( !cur_vs.is_composite() )
+ {
+ ::HIR::TypeRef tmp;
+ const auto& ty = mir_res.get_lvalue_type(tmp, *e.val);
+ unsigned int n_fields = 0;
+ if( const auto* e = ty.m_data.opt_Tuple() )
+ {
+ n_fields = e->size();
+ }
+ else if( ty.m_data.is_Path() && ty.m_data.as_Path().binding.is_Struct() )
+ {
+ const auto& e = ty.m_data.as_Path().binding.as_Struct();
+ TU_MATCHA( (e->m_data), (se),
+ (Unit,
+ n_fields = 0;
+ ),
+ (Tuple,
+ n_fields = se.size();
+ ),
+ (Named,
+ n_fields = se.size();
+ )
+ )
+ }
+ else {
+ MIR_BUG(mir_res, "Unknown type being accessed with Field - " << ty);
+ }
+ cur_vs = this->allocate_composite(n_fields, cur_vs);
+ set_lvalue_state(mir_res, *e.val, cur_vs);
+ }
+ // Get composite state and assign into it
+ auto& states = this->get_composite(mir_res, cur_vs);
+ MIR_ASSERT(mir_res, e.field_index < states.size(), "Field index out of range");
+ states[e.field_index] = new_vs;
+ }
+ ),
+ (Deref,
+ auto cur_vs = get_lvalue_state(mir_res, *e.val);
+ if( !cur_vs.is_composite() && cur_vs == new_vs )
+ {
+ // Not a composite, and no state change
+ }
+ else
+ {
+ if( !cur_vs.is_composite() )
+ {
+ //::HIR::TypeRef tmp;
+ //const auto& ty = mir_res.get_lvalue_type(tmp, *e.val);
+ // TODO: Should this check if the type is Box?
+
+ cur_vs = this->allocate_composite(2, cur_vs);
+ set_lvalue_state(mir_res, *e.val, cur_vs);
+ }
+ // Get composite state and assign into it
+ auto& states = this->get_composite(mir_res, cur_vs);
+ MIR_ASSERT(mir_res, states.size() == 2, "Deref with invalid state list size");
+ states[1] = new_vs;
+ }
+ ),
+ (Index,
+ auto vs_v = get_lvalue_state(mir_res, *e.val);
+ auto vs_i = get_lvalue_state(mir_res, *e.idx);
+ MIR_ASSERT(mir_res, !vs_v.is_composite(), "");
+ MIR_ASSERT(mir_res, !vs_i.is_composite(), "");
+
+ MIR_ASSERT(mir_res, vs_v.is_valid(), "Indexing an invalid value");
+ MIR_ASSERT(mir_res, vs_i.is_valid(), "Indexing with an invalid index");
+
+ // NOTE: Ignore
+ ),
+ (Downcast,
+ auto cur_vs = get_lvalue_state(mir_res, *e.val);
+ if( !cur_vs.is_composite() && cur_vs == new_vs )
+ {
+ // Not a composite, and no state change
+ }
+ else
+ {
+ if( !cur_vs.is_composite() )
+ {
+ cur_vs = this->allocate_composite(1, cur_vs);
+ set_lvalue_state(mir_res, *e.val, cur_vs);
+ }
+ // Get composite state and assign into it
+ auto& states = this->get_composite(mir_res, cur_vs);
+ MIR_ASSERT(mir_res, states.size() == 1, "Downcast on composite of invalid size");
+ states[0] = new_vs;
+ }
+ )
+ )
+ }
+ };
+
+
+ struct StateSet
+ {
+ ::std::vector<ValueStates> known_state_sets;
+
+ bool add_state(const ValueStates& state_set)
+ {
+ for(const auto& s : this->known_state_sets)
+ {
+ if( s.is_equivalent_to(state_set) )
+ {
+ return false;
+ }
+ }
+ this->known_state_sets.push_back( state_set.clone() );
+ this->known_state_sets.back().bb_path = ::std::vector<unsigned int>();
+ return true;
+ }
+ };
+}
+
+::std::ostream& operator<<(::std::ostream& os, const StateFmt& x)
+{
+ if(x.s.index == 0) {
+ os << "_";
+ }
+ else if( x.s.index == ~0u ) {
+ os << "X";
+ }
+ else {
+ assert(x.s.index-1 < x.vss.inner_states.size());
+ const auto& is = x.vss.inner_states[x.s.index-1];
+ os << "[";
+ for(const auto& s : is)
+ os << StateFmt(x.vss, s);
+ os << "]";
+ }
+ return os;
+}
+
+namespace std {
+ ostream& operator<<(ostream& os, const ValueStates& x)
+ {
+ auto print_val = [&](auto tag, const State& s) {
+ if(s.is_composite()) {
+ os << tag << "=" << StateFmt(x,s);
+ }
+ else if( s.is_valid() ) {
+ os << tag;
+ }
+ else {
+ }
+ };
+
+ os << "ValueStates(path=[" << x.bb_path << "]";
+ print_val(",rv", x.return_value);
+ for(unsigned int i = 0; i < x.arguments.size(); i ++)
+ print_val(FMT_CB(ss, ss << ",a" << i;), x.arguments[i]);
+ for(unsigned int i = 0; i < x.vars.size(); i ++)
+ print_val(FMT_CB(ss, ss << ",_" << i;), x.vars[i]);
+ for(unsigned int i = 0; i < x.temporaries.size(); i ++)
+ print_val(FMT_CB(ss, ss << ",t" << i;), x.temporaries[i]);
+ for(unsigned int i = 0; i < x.drop_flags.size(); i++)
+ if(x.drop_flags[i])
+ os << ",df" << i;
+ os << ")";
+ return os;
+ }
+}
+
+
+// "Executes" the function, keeping track of drop flags and variable validities
+void MIR_Validate_FullValState(::MIR::TypeResolve& mir_res, const ::MIR::Function& fcn)
+{
+ // TODO: Use a timer to check elapsed CPU time in this function, and check on each iteration
+ // - If more than `n` (10?) seconds passes on one function, warn and abort
+ //ElapsedTimeCounter timer;
+ ::std::vector<StateSet> block_entry_states( fcn.blocks.size() );
+
+ // Determine value lifetimes (BBs in which Copy values are valid)
+ // - Used to mask out Copy value (prevents combinatorial explosion)
+ auto lifetimes = MIR_Helper_GetLifetimes(mir_res, fcn, /*dump_debug=*/true);
+ DEBUG(lifetimes.m_block_offsets);
+
+ ValueStates state;
+ state.arguments.resize( mir_res.m_args.size(), State(true) );
+ state.vars.resize( fcn.named_variables.size() );
+ state.temporaries.resize( fcn.temporaries.size() );
+ state.drop_flags = fcn.drop_flags;
+
+ ::std::vector< ::std::pair<unsigned int, ValueStates> > todo_queue;
+ todo_queue.push_back( ::std::make_pair(0, mv$(state)) );
+ while( ! todo_queue.empty() )
+ {
+ auto cur_block = todo_queue.back().first;
+ auto state = mv$(todo_queue.back().second);
+ todo_queue.pop_back();
+
+ // Mask off any values which aren't valid in the first statement of this block
+ {
+ for(unsigned i = 0; i < state.vars.size(); i ++)
+ {
+ /*if( !variables_copy[i] )
+ {
+ // Not Copy, don't apply masking
+ }
+ else*/ if( ! state.vars[i].is_valid() )
+ {
+ // Already invalid
+ }
+ else if( lifetimes.var_valid(i, cur_block, 0) )
+ {
+ // Expected to be valid in this block, leave as-is
+ }
+ else
+ {
+ // Copy value not used at/after this block, mask to false
+ DEBUG("BB" << cur_block << " - var$" << i << " - Outside lifetime, discard");
+ state.vars[i] = State(false);
+ }
+ }
+ for(unsigned i = 0; i < state.temporaries.size(); i ++)
+ {
+ /*if( !variables_copy[i] )
+ {
+ // Not Copy, don't apply masking
+ }
+ else*/ if( ! state.temporaries[i].is_valid() )
+ {
+ // Already invalid
+ }
+ else if( lifetimes.tmp_valid(i, cur_block, 0) )
+ {
+ // Expected to be valid in this block, leave as-is
+ }
+ else
+ {
+ // Copy value not used at/after this block, mask to false
+ DEBUG("BB" << cur_block << " - tmp$" << i << " - Outside lifetime, discard");
+ state.temporaries[i] = State(false);
+ }
+ }
+ }
+
+ // If this state already exists in the map, skip
+ if( ! block_entry_states[cur_block].add_state(state) )
+ {
+ DEBUG("BB" << cur_block << " - Nothing new");
+ continue ;
+ }
+ DEBUG("BB" << cur_block << " - " << state);
+ state.bb_path.push_back( cur_block );
+
+ const auto& blk = fcn.blocks.at(cur_block);
+ for(size_t i = 0; i < blk.statements.size(); i++)
+ {
+ mir_res.set_cur_stmt(cur_block, i);
+
+ DEBUG(mir_res << blk.statements[i]);
+
+ TU_MATCHA( (blk.statements[i]), (se),
+ (Assign,
+ #if ENABLE_LEAK_DETECTOR
+ // TODO: Check if the target isn't valid. Allow if either invaid, or too complex to know.
+ #endif
+ TU_MATCHA( (se.src), (ve),
+ (Use,
+ state.move_lvalue(mir_res, ve);
+ ),
+ (Constant,
+ ),
+ (SizedArray,
+ state.ensure_param_valid(mir_res, ve.val);
+ ),
+ (Borrow,
+ state.ensure_lvalue_valid(mir_res, ve.val);
+ ),
+ // Cast on primitives
+ (Cast,
+ state.ensure_lvalue_valid(mir_res, ve.val);
+ ),
+ // Binary operation on primitives
+ (BinOp,
+ state.ensure_param_valid(mir_res, ve.val_l);
+ state.ensure_param_valid(mir_res, ve.val_r);
+ ),
+ // Unary operation on primitives
+ (UniOp,
+ state.ensure_lvalue_valid(mir_res, ve.val);
+ ),
+ // Extract the metadata from a DST pointer
+ // NOTE: If used on an array, this yields the array size (for generics)
+ (DstMeta,
+ state.ensure_lvalue_valid(mir_res, ve.val);
+ ),
+ // Extract the pointer from a DST pointer (as *const ())
+ (DstPtr,
+ state.ensure_lvalue_valid(mir_res, ve.val);
+ ),
+ // Construct a DST pointer from a thin pointer and metadata
+ (MakeDst,
+ state.ensure_param_valid(mir_res, ve.ptr_val);
+ state.ensure_param_valid(mir_res, ve.meta_val);
+ ),
+ (Tuple,
+ for(const auto& v : ve.vals)
+ if(const auto* e = v.opt_LValue())
+ state.move_lvalue(mir_res, *e);
+ ),
+ // Array literal
+ (Array,
+ for(const auto& v : ve.vals)
+ if(const auto* e = v.opt_LValue())
+ state.move_lvalue(mir_res, *e);
+ ),
+ // Create a new instance of a union (and eventually enum)
+ (Variant,
+ if(const auto* e = ve.val.opt_LValue())
+ state.move_lvalue(mir_res, *e);
+ ),
+ // Create a new instance of a struct (or enum)
+ (Struct,
+ for(const auto& v : ve.vals)
+ if(const auto* e = v.opt_LValue())
+ state.move_lvalue(mir_res, *e);
+ )
+ )
+ state.mark_lvalue_valid(mir_res, se.dst);
+ ),
+ (Asm,
+ for(const auto& v : se.inputs)
+ state.ensure_lvalue_valid(mir_res, v.second);
+ for(const auto& v : se.outputs)
+ state.mark_lvalue_valid(mir_res, v.second);
+ ),
+ (SetDropFlag,
+ if( se.other == ~0u )
+ {
+ state.drop_flags[se.idx] = se.new_val;
+ }
+ else
+ {
+ state.drop_flags[se.idx] = (se.new_val != state.drop_flags[se.other]);
+ }
+ ),
+ (Drop,
+ if( se.flag_idx == ~0u || state.drop_flags.at(se.flag_idx) )
+ {
+ if( se.kind == ::MIR::eDropKind::SHALLOW )
+ {
+ // HACK: A move out of a Box generates the following pattern: `[[[[X_]]X]]`
+ // - Ensure that that is the pattern we're seeing here.
+ auto vs = state.get_lvalue_state(mir_res, se.slot);
+
+ MIR_ASSERT(mir_res, vs.index != ~0u, "Shallow drop on fully-valid value - " << se.slot);
+
+ // Box<T> - Wrapper around Unique<T>
+ MIR_ASSERT(mir_res, vs.is_composite(), "Shallow drop on non-composite state - " << se.slot << " (state=" << StateFmt(state,vs) << ")");
+ const auto& sub_states = state.get_composite(mir_res, vs);
+ MIR_ASSERT(mir_res, sub_states.size() == 1, "");
+ // Unique<T> - NonZero<*const T>, PhantomData<T>
+ MIR_ASSERT(mir_res, sub_states[0].is_composite(), "");
+ const auto& sub_states2 = state.get_composite(mir_res, sub_states[0]);
+ MIR_ASSERT(mir_res, sub_states2.size() == 2, "- " << StateFmt(state, sub_states[0]));
+ MIR_ASSERT(mir_res, sub_states2[0].is_composite(), "");
+ MIR_ASSERT(mir_res, sub_states2[1].is_valid(), "");
+ // `NonZero<*const T>` - *const T
+ const auto& sub_states3 = state.get_composite(mir_res, sub_states2[0]);
+ MIR_ASSERT(mir_res, sub_states3.size() == 1, "- " << StateFmt(state, sub_states2[0]));
+ MIR_ASSERT(mir_res, sub_states3[0].is_composite(), "");
+ // `*const T` - Moved out of, so has a composite state
+ const auto& sub_states4 = state.get_composite(mir_res, sub_states3[0]);
+ MIR_ASSERT(mir_res, sub_states4.size() == 2, "- " << StateFmt(state, sub_states3[0]));
+ MIR_ASSERT(mir_res, sub_states4[0].is_valid(), "Shallow drop on deallocated Box - " << se.slot << " (state=" << StateFmt(state,vs) << ")");
+ // TODO: This is leak protection, enable it once the rest works
+ if( ENABLE_LEAK_DETECTOR )
+ {
+ MIR_ASSERT(mir_res, !sub_states4[1].is_valid(), "Shallow drop on populated Box - " << se.slot << " (state=" << StateFmt(state,vs) << ")");
+ }
+
+ state.set_lvalue_state(mir_res, se.slot, State(false));
+ }
+ else
+ {
+ state.move_lvalue(mir_res, se.slot);
+ }
+ }
+ ),
+ (ScopeEnd,
+ // TODO: Mark all mentioned variables as invalid
+ )
+ )
+ }
+
+ state.garbage_collect();
+
+ mir_res.set_cur_stmt_term(cur_block);
+ DEBUG(mir_res << " " << blk.terminator);
+ TU_MATCHA( (blk.terminator), (te),
+ (Incomplete,
+ ),
+ (Return,
+ state.ensure_lvalue_valid(mir_res, ::MIR::LValue::make_Return({}));
+ if( ENABLE_LEAK_DETECTOR )
+ {
+ auto ensure_dropped = [&](const State& s, const ::MIR::LValue& lv) {
+ if( s.is_valid() ) {
+ // Check if !Copy
+ ::HIR::TypeRef tmp;
+ const auto& ty = mir_res.get_lvalue_type(tmp, lv);
+ if( mir_res.m_resolve.type_is_copy(mir_res.sp, ty) ) {
+ }
+ else {
+ MIR_BUG(mir_res, "Value " << lv << " was not dropped at end of function");
+ }
+ }
+ };
+ for(unsigned i = 0; i < state.arguments.size(); i ++ ) {
+ ensure_dropped(state.arguments[i], ::MIR::LValue::make_Argument({i}));
+ }
+ for(unsigned i = 0; i < state.vars.size(); i ++ ) {
+ ensure_dropped(state.vars[i], ::MIR::LValue::make_Variable(i));
+ }
+ }
+ ),
+ (Diverge,
+ ),
+ (Goto, // Jump to another block
+ todo_queue.push_back( ::std::make_pair(te, mv$(state)) );
+ ),
+ (Panic,
+ todo_queue.push_back( ::std::make_pair(te.dst, mv$(state)) );
+ ),
+ (If,
+ state.ensure_lvalue_valid(mir_res, te.cond);
+ todo_queue.push_back( ::std::make_pair(te.bb0, state.clone()) );
+ todo_queue.push_back( ::std::make_pair(te.bb1, mv$(state)) );
+ ),
+ (Switch,
+ state.ensure_lvalue_valid(mir_res, te.val);
+ for(size_t i = 0; i < te.targets.size(); i ++)
+ {
+ todo_queue.push_back( ::std::make_pair(te.targets[i], i == te.targets.size()-1 ? mv$(state) : state.clone()) );
+ }
+ ),
+ (Call,
+ if(const auto* e = te.fcn.opt_Value())
+ {
+ state.ensure_lvalue_valid(mir_res, *e);
+ }
+ for(auto& arg : te.args)
+ {
+ if(const auto* e = arg.opt_LValue())
+ {
+ state.move_lvalue(mir_res, *e);
+ }
+ }
+ todo_queue.push_back( ::std::make_pair(te.panic_block, state.clone()) );
+ state.mark_lvalue_valid(mir_res, te.ret_val);
+ todo_queue.push_back( ::std::make_pair(te.ret_block, mv$(state)) );
+ )
+ )
+ }
+}
+
+void MIR_Validate_Full(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path, const ::MIR::Function& fcn, const ::HIR::Function::args_t& args, const ::HIR::TypeRef& ret_type)
+{
+ TRACE_FUNCTION_F(path);
+ Span sp;
+ ::MIR::TypeResolve state { sp, resolve, FMT_CB(ss, ss << path;), ret_type, args, fcn };
+ // Validation rules:
+
+ MIR_Validate_FullValState(state, fcn);
+}
+
+// --------------------------------------------------------------------
+
+void MIR_CheckCrate_Full(/*const*/ ::HIR::Crate& crate)
+{
+ ::MIR::OuterVisitor ov(crate, [](const auto& res, const auto& p, auto& expr, const auto& args, const auto& ty)
+ {
+ MIR_Validate_Full(res, p, *expr.m_mir, args, ty);
+ }
+ );
+ ov.visit_crate( crate );
+}
+
diff --git a/src/mir/cleanup.cpp b/src/mir/cleanup.cpp
index 6516dd60..17dca948 100644
--- a/src/mir/cleanup.cpp
+++ b/src/mir/cleanup.cpp
@@ -932,6 +932,8 @@ void MIR_Cleanup(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path,
),
(SetDropFlag,
),
+ (ScopeEnd,
+ ),
(Asm,
for(auto& v : se.inputs)
MIR_Cleanup_LValue(state, mutator, v.second);
diff --git a/src/mir/dump.cpp b/src/mir/dump.cpp
index f3dee478..b4177295 100644
--- a/src/mir/dump.cpp
+++ b/src/mir/dump.cpp
@@ -9,6 +9,7 @@
#include <hir/visitor.hpp>
#include "mir.hpp"
#include "operations.hpp"
+#include <iomanip>
namespace {
@@ -98,6 +99,14 @@ namespace {
m_os << " IF df$" << e.flag_idx;
}
m_os << ");\n";
+ ),
+ (ScopeEnd,
+ m_os << "// Scope End: ";
+ for(auto idx : e.vars)
+ m_os << "var$" << idx << ",";
+ for(auto idx : e.tmps)
+ m_os << "tmp$" << idx << ",";
+ m_os << "\n";
)
)
}
@@ -208,7 +217,19 @@ namespace {
os << (ce.v ? "true" : "false");
),
(Bytes,
- os << "b\"" << ce << "\"";
+ os << ::std::hex << "b\"";
+ for(auto b : ce)
+ {
+ if( b == '\\' )
+ os << "\\\\";
+ else if( b == '"' )
+ os << "\\\"";
+ else if( ' ' <= b && b < 0x7F )
+ os << b;
+ else
+ os << "\\x" << ::std::setw(2) << ::std::setfill('0') << (int)b;
+ }
+ os << ::std::dec << "\"";
),
(StaticString,
os << "\"" << ce << "\"";
@@ -494,6 +515,30 @@ namespace {
m_os << indent() << " ;\n";
}
}
+ void visit_constant(::HIR::ItemPath p, ::HIR::Constant& item) override
+ {
+ m_os << indent();
+ m_os << "const ";
+ if( m_short_item_name )
+ m_os << p.get_name();
+ else
+ m_os << p;
+ m_os << ": " << item.m_type;
+ if( item.m_value )
+ {
+ inc_indent();
+ m_os << " = {\n";
+ inc_indent();
+ dump_mir(m_os, m_indent_level, *item.m_value.m_mir);
+ dec_indent();
+ m_os << indent() << "} /* = " << item.m_value_res << "*/;\n";
+ dec_indent();
+ }
+ else
+ {
+ m_os << ";\n";
+ }
+ }
void visit_static(::HIR::ItemPath p, ::HIR::Static& item) override
{
m_os << indent();
@@ -506,7 +551,7 @@ namespace {
if( item.m_value )
{
inc_indent();
- m_os << "= {\n";
+ m_os << " = {\n";
inc_indent();
dump_mir(m_os, m_indent_level, *item.m_value.m_mir);
dec_indent();
diff --git a/src/mir/from_hir.cpp b/src/mir/from_hir.cpp
index e62722ef..ceda0a87 100644
--- a/src/mir/from_hir.cpp
+++ b/src/mir/from_hir.cpp
@@ -21,6 +21,26 @@
namespace {
+ template<typename T>
+ struct SaveAndEditVal {
+ T& m_dst;
+ T m_saved;
+ SaveAndEditVal(T& dst, T newval):
+ m_dst(dst),
+ m_saved(dst)
+ {
+ m_dst = mv$(newval);
+ }
+ ~SaveAndEditVal()
+ {
+ this->m_dst = this->m_saved;
+ }
+ };
+ template<typename T>
+ SaveAndEditVal<T> save_and_edit(T& dst, typename ::std::remove_reference<T&>::type newval) {
+ return SaveAndEditVal<T> { dst, mv$(newval) };
+ }
+
class ExprVisitor_Conv:
public MirConverter
{
@@ -36,6 +56,11 @@ namespace {
};
::std::vector<LoopDesc> m_loop_stack;
+ const ScopeHandle* m_block_tmp_scope = nullptr;
+ const ScopeHandle* m_borrow_raise_target = nullptr;
+ const ScopeHandle* m_stmt_scope = nullptr;
+ bool m_in_borrow = false;
+
public:
ExprVisitor_Conv(MirBuilder& builder, const ::std::vector< ::HIR::TypeRef>& var_types):
m_builder(builder),
@@ -153,11 +178,22 @@ namespace {
m_builder.push_stmt_assign( sp, ::MIR::LValue::make_Variable(pat.m_binding.m_slot), mv$(lval) );
break;
case ::HIR::PatternBinding::Type::Ref:
+ if(m_borrow_raise_target)
+ {
+ DEBUG("- Raising destructure borrow of " << lval << " to scope " << *m_borrow_raise_target);
+ m_builder.raise_variables(sp, lval, *m_borrow_raise_target);
+ }
+
m_builder.push_stmt_assign( sp, ::MIR::LValue::make_Variable(pat.m_binding.m_slot), ::MIR::RValue::make_Borrow({
0, ::HIR::BorrowType::Shared, mv$(lval)
}) );
break;
case ::HIR::PatternBinding::Type::MutRef:
+ if(m_borrow_raise_target)
+ {
+ DEBUG("- Raising destructure borrow of " << lval << " to scope " << *m_borrow_raise_target);
+ m_builder.raise_variables(sp, lval, *m_borrow_raise_target);
+ }
m_builder.push_stmt_assign( sp, ::MIR::LValue::make_Variable(pat.m_binding.m_slot), ::MIR::RValue::make_Borrow({
0, ::HIR::BorrowType::Unique, mv$(lval)
}) );
@@ -331,7 +367,7 @@ namespace {
if( e.extra_bind.is_valid() )
{
// 1. Obtain remaining length
- auto sub_val = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Usize, ::MIR::Constant::make_Uint({ e.leading.size() + e.trailing.size(), ::HIR::CoreType::Usize }));
+ auto sub_val = ::MIR::Param(::MIR::Constant::make_Uint({ e.leading.size() + e.trailing.size(), ::HIR::CoreType::Usize }));
::MIR::LValue len_val = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Usize, ::MIR::RValue::make_BinOp({ len_lval.clone(), ::MIR::eBinOp::SUB, mv$(sub_val) }) );
// 2. Obtain pointer to element
@@ -370,79 +406,88 @@ namespace {
{
TRACE_FUNCTION_F("_Block");
// NOTE: This doesn't create a BB, as BBs are not needed for scoping
- if( node.m_nodes.size() > 0 )
- {
- bool diverged = false;
+ bool diverged = false;
- auto scope = m_builder.new_scope_var(node.span());
+ auto res_val = (node.m_value_node ? m_builder.new_temporary(node.m_res_type) : ::MIR::LValue());
+ auto scope = m_builder.new_scope_var(node.span());
+ auto tmp_scope = m_builder.new_scope_temp(node.span());
+ auto _block_tmp_scope = save_and_edit(m_block_tmp_scope, &tmp_scope);
- for(unsigned int i = 0; i < node.m_nodes.size() - (node.m_yields_final ? 1 : 0); i ++)
- {
- auto& subnode = node.m_nodes[i];
- const Span& sp = subnode->span();
+ for(unsigned int i = 0; i < node.m_nodes.size(); i ++)
+ {
+ auto _ = save_and_edit(m_borrow_raise_target, nullptr);
+ auto& subnode = node.m_nodes[i];
+ const Span& sp = subnode->span();
- auto stmt_scope = m_builder.new_scope_temp(sp);
- this->visit_node_ptr(subnode);
+ auto stmt_scope = m_builder.new_scope_temp(sp);
+ auto _stmt_scope_push = save_and_edit(m_stmt_scope, &stmt_scope);
+ this->visit_node_ptr(subnode);
- if( m_builder.block_active() || m_builder.has_result() ) {
- // TODO: Emit a drop
- m_builder.get_result(sp);
- m_builder.terminate_scope(sp, mv$(stmt_scope));
- }
- else {
- m_builder.terminate_scope(sp, mv$(stmt_scope), false);
+ if( m_builder.block_active() || m_builder.has_result() ) {
+ // TODO: Emit a drop
+ m_builder.get_result(sp);
+ m_builder.terminate_scope(sp, mv$(stmt_scope));
+ }
+ else {
+ m_builder.terminate_scope(sp, mv$(stmt_scope), false);
- m_builder.set_cur_block( m_builder.new_bb_unlinked() );
- diverged = true;
- }
+ m_builder.set_cur_block( m_builder.new_bb_unlinked() );
+ diverged = true;
}
+ }
- // For the last node, specially handle.
- if( node.m_yields_final )
- {
- auto& subnode = node.m_nodes.back();
- const Span& sp = subnode->span();
+ // For the last node, specially handle.
+ // TODO: Any temporaries defined within this node must be elevated into the parent scope
+ if( node.m_value_node )
+ {
+ auto& subnode = node.m_value_node;
+ const Span& sp = subnode->span();
- auto res_val = m_builder.new_temporary(node.m_res_type);
- auto stmt_scope = m_builder.new_scope_temp(sp);
- this->visit_node_ptr(subnode);
- if( m_builder.has_result() || m_builder.block_active() )
- {
- ASSERT_BUG(sp, m_builder.block_active(), "Result yielded, but no active block");
- ASSERT_BUG(sp, m_builder.has_result(), "Active block but no result yeilded");
- // PROBLEM: This can drop the result before we want to use it.
+ auto stmt_scope = m_builder.new_scope_temp(sp);
+ this->visit_node_ptr(subnode);
+ if( m_builder.has_result() || m_builder.block_active() )
+ {
+ ASSERT_BUG(sp, m_builder.block_active(), "Result yielded, but no active block");
+ ASSERT_BUG(sp, m_builder.has_result(), "Active block but no result yeilded");
+ // PROBLEM: This can drop the result before we want to use it.
- m_builder.push_stmt_assign(sp, res_val.clone(), m_builder.get_result(sp));
+ m_builder.push_stmt_assign(sp, res_val.clone(), m_builder.get_result(sp));
- m_builder.terminate_scope(sp, mv$(stmt_scope));
- m_builder.terminate_scope( node.span(), mv$(scope) );
- m_builder.set_result( node.span(), mv$(res_val) );
+ // If this block is part of a statement, raise all temporaries from this final scope to the enclosing scope
+ if( m_stmt_scope )
+ {
+ m_builder.raise_all(sp, mv$(stmt_scope), *m_stmt_scope);
+ //m_builder.terminate_scope(sp, mv$(stmt_scope));
}
else
{
- m_builder.terminate_scope( node.span(), mv$(stmt_scope), false );
- m_builder.terminate_scope( node.span(), mv$(scope), false );
- // Block diverged in final node.
+ m_builder.terminate_scope(sp, mv$(stmt_scope));
}
+ m_builder.set_result( node.span(), mv$(res_val) );
}
else
{
- if( diverged )
- {
- m_builder.terminate_scope( node.span(), mv$(scope), false );
- m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
- // Don't set a result if there's no block.
- }
- else
- {
- m_builder.terminate_scope( node.span(), mv$(scope) );
- m_builder.set_result(node.span(), ::MIR::RValue::make_Tuple({}));
- }
+ m_builder.terminate_scope( sp, mv$(stmt_scope), false );
+ // Block diverged in final node.
}
+ m_builder.terminate_scope( node.span(), mv$(tmp_scope), m_builder.block_active() );
+ m_builder.terminate_scope( node.span(), mv$(scope), m_builder.block_active() );
}
else
{
- m_builder.set_result(node.span(), ::MIR::RValue::make_Tuple({}));
+ if( diverged )
+ {
+ m_builder.terminate_scope( node.span(), mv$(tmp_scope), false );
+ m_builder.terminate_scope( node.span(), mv$(scope), false );
+ m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
+ // Don't set a result if there's no block.
+ }
+ else
+ {
+ m_builder.terminate_scope( node.span(), mv$(tmp_scope) );
+ m_builder.terminate_scope( node.span(), mv$(scope) );
+ m_builder.set_result(node.span(), ::MIR::RValue::make_Tuple({}));
+ }
}
}
void visit(::HIR::ExprNode_Asm& node) override
@@ -477,23 +522,25 @@ namespace {
}
void visit(::HIR::ExprNode_Let& node) override
{
- TRACE_FUNCTION_F("_Let");
+ TRACE_FUNCTION_F("_Let " << node.m_pattern);
this->define_vars_from(node.span(), node.m_pattern);
if( node.m_value )
{
+ auto _ = save_and_edit(m_borrow_raise_target, m_block_tmp_scope);
this->visit_node_ptr(node.m_value);
if( ! m_builder.block_active() ) {
return ;
}
+ auto res = m_builder.get_result(node.span());
if( node.m_pattern.m_binding.is_valid() && node.m_pattern.m_data.is_Any() && node.m_pattern.m_binding.m_type == ::HIR::PatternBinding::Type::Move )
{
- m_builder.push_stmt_assign( node.span(), ::MIR::LValue::make_Variable(node.m_pattern.m_binding.m_slot), m_builder.get_result(node.span()) );
+ m_builder.push_stmt_assign( node.span(), ::MIR::LValue::make_Variable(node.m_pattern.m_binding.m_slot), mv$(res) );
}
else
{
- this->destructure_from(node.span(), node.m_pattern, m_builder.get_result_in_lvalue(node.m_value->span(), node.m_type));
+ this->destructure_from(node.span(), node.m_pattern, m_builder.lvalue_or_temp(node.m_value->span(), node.m_type, mv$(res)));
}
}
m_builder.set_result(node.span(), ::MIR::RValue::make_Tuple({}));
@@ -505,6 +552,10 @@ namespace {
auto loop_block = m_builder.new_bb_linked();
auto loop_next = m_builder.new_bb_unlinked();
+ auto loop_tmp_scope = m_builder.new_scope_temp(node.span());
+ auto _ = save_and_edit(m_stmt_scope, &loop_tmp_scope);
+
+ // TODO: `continue` in a loop should jump to the cleanup, not the top
m_loop_stack.push_back( LoopDesc { mv$(loop_body_scope), node.m_label, loop_block, loop_next } );
this->visit_node_ptr(node.m_code);
auto loop_scope = mv$(m_loop_stack.back().scope);
@@ -513,7 +564,7 @@ namespace {
// If there's a stray result, drop it
if( m_builder.has_result() ) {
assert( m_builder.block_active() );
- // TODO: Properly drop this? Or just discard it?
+ // TODO: Properly drop this? Or just discard it? It should be ()
m_builder.get_result(node.span());
}
// Terminate block with a jump back to the start
@@ -522,12 +573,14 @@ namespace {
{
DEBUG("- Reached end, loop back");
// Insert drop of all scopes within the current scope
+ m_builder.terminate_scope( node.span(), mv$(loop_tmp_scope) );
m_builder.terminate_scope( node.span(), mv$(loop_scope) );
m_builder.end_block( ::MIR::Terminator::make_Goto(loop_block) );
}
else
{
// Terminate scope without emitting cleanup (cleanup was handled by `break`)
+ m_builder.terminate_scope( node.span(), mv$(loop_tmp_scope), false );
m_builder.terminate_scope( node.span(), mv$(loop_scope), false );
}
@@ -579,6 +632,8 @@ namespace {
void visit(::HIR::ExprNode_Match& node) override
{
TRACE_FUNCTION_FR("_Match", "_Match");
+ auto _ = save_and_edit(m_borrow_raise_target, nullptr);
+ //auto stmt_scope = m_builder.new_scope_temp(node.span());
this->visit_node_ptr(node.m_value);
auto match_val = m_builder.get_result_in_lvalue(node.m_value->span(), node.m_value->m_res_type);
@@ -608,7 +663,7 @@ namespace {
if( m_builder.block_active() ) {
auto res = m_builder.get_result(arm.m_code->span());
- m_builder.raise_variables( arm.m_code->span(), res, scope );
+ m_builder.raise_variables( arm.m_code->span(), res, scope, /*to_above=*/true);
m_builder.set_result(arm.m_code->span(), mv$(res));
m_builder.terminate_scope( node.span(), mv$(tmp_scope) );
@@ -622,6 +677,19 @@ namespace {
else {
MIR_LowerHIR_Match(m_builder, *this, node, mv$(match_val));
}
+
+ if( m_builder.block_active() ) {
+ const auto& sp = node.span();
+
+ auto res = m_builder.get_result(sp);
+ //m_builder.raise_variables(sp, res, stmt_scope, /*to_above=*/true);
+ m_builder.set_result(sp, mv$(res));
+
+ //m_builder.terminate_scope( node.span(), mv$(stmt_scope) );
+ }
+ else {
+ //m_builder.terminate_scope( node.span(), mv$(stmt_scope), false );
+ }
} // ExprNode_Match
void emit_if(/*const*/ ::HIR::ExprNodeP& cond, ::MIR::BasicBlockId true_branch, ::MIR::BasicBlockId false_branch)
@@ -691,7 +759,7 @@ namespace {
auto scope = m_builder.new_scope_temp( cond->span() );
this->visit_node_ptr(*cond_p);
ASSERT_BUG(cond->span(), cond->m_res_type == ::HIR::CoreType::Bool, "If condition wasn't a bool");
- decision_val = m_builder.get_result_in_lvalue(cond->span(), ::HIR::CoreType::Bool);
+ decision_val = m_builder.get_result_in_if_cond(cond->span());
m_builder.terminate_scope(cond->span(), mv$(scope));
}
@@ -760,7 +828,7 @@ namespace {
m_builder.set_result( node.span(), mv$(result_val) );
}
- void generate_checked_binop(const Span& sp, ::MIR::LValue res_slot, ::MIR::eBinOp op, ::MIR::LValue val_l, const ::HIR::TypeRef& ty_l, ::MIR::LValue val_r, const ::HIR::TypeRef& ty_r)
+ void generate_checked_binop(const Span& sp, ::MIR::LValue res_slot, ::MIR::eBinOp op, ::MIR::Param val_l, const ::HIR::TypeRef& ty_l, ::MIR::Param val_r, const ::HIR::TypeRef& ty_r)
{
switch(op)
{
@@ -870,7 +938,16 @@ namespace {
if( node.m_op != ::HIR::ExprNode_Assign::Op::None )
{
auto dst_clone = dst.clone();
- auto val_lv = m_builder.lvalue_or_temp( node.span(), ty_val, mv$(val) );
+ ::MIR::Param val_p;
+ if( auto* e = val.opt_Use() ) {
+ val_p = mv$(*e);
+ }
+ else if( auto* e = val.opt_Constant() ) {
+ val_p = mv$(*e);
+ }
+ else {
+ val_p = m_builder.lvalue_or_temp( node.span(), ty_val, mv$(val) );
+ }
ASSERT_BUG(sp, ty_slot.m_data.is_Primitive(), "Assignment operator overloads are only valid on primitives - ty_slot="<<ty_slot);
ASSERT_BUG(sp, ty_val.m_data.is_Primitive(), "Assignment operator overloads are only valid on primitives - ty_val="<<ty_val);
@@ -885,16 +962,16 @@ namespace {
case _(Mul): op = ::MIR::eBinOp::MUL; if(0)
case _(Div): op = ::MIR::eBinOp::DIV; if(0)
case _(Mod): op = ::MIR::eBinOp::MOD;
- this->generate_checked_binop(sp, mv$(dst), op, mv$(dst_clone), ty_slot, mv$(val_lv), ty_val);
+ this->generate_checked_binop(sp, mv$(dst), op, mv$(dst_clone), ty_slot, mv$(val_p), ty_val);
break;
case _(Xor): op = ::MIR::eBinOp::BIT_XOR; if(0)
case _(Or ): op = ::MIR::eBinOp::BIT_OR ; if(0)
case _(And): op = ::MIR::eBinOp::BIT_AND;
- this->generate_checked_binop(sp, mv$(dst), op, mv$(dst_clone), ty_slot, mv$(val_lv), ty_val);
+ this->generate_checked_binop(sp, mv$(dst), op, mv$(dst_clone), ty_slot, mv$(val_p), ty_val);
break;
case _(Shl): op = ::MIR::eBinOp::BIT_SHL; if(0)
case _(Shr): op = ::MIR::eBinOp::BIT_SHR;
- this->generate_checked_binop(sp, mv$(dst), op, mv$(dst_clone), ty_slot, mv$(val_lv), ty_val);
+ this->generate_checked_binop(sp, mv$(dst), op, mv$(dst_clone), ty_slot, mv$(val_p), ty_val);
break;
}
#undef _
@@ -916,12 +993,12 @@ namespace {
const auto& ty_r = node.m_right->m_res_type;
auto res = m_builder.new_temporary(node.m_res_type);
- this->visit_node_ptr(node.m_left);
- auto left = m_builder.get_result_in_lvalue(node.m_left->span(), ty_l);
-
// Short-circuiting boolean operations
if( node.m_op == ::HIR::ExprNode_BinOp::Op::BoolAnd || node.m_op == ::HIR::ExprNode_BinOp::Op::BoolOr )
{
+ this->visit_node_ptr(node.m_left);
+ auto left = m_builder.get_result_in_lvalue(node.m_left->span(), ty_l);
+
auto bb_next = m_builder.new_bb_unlinked();
auto bb_true = m_builder.new_bb_unlinked();
auto bb_false = m_builder.new_bb_unlinked();
@@ -963,8 +1040,10 @@ namespace {
{
}
+ this->visit_node_ptr(node.m_left);
+ auto left = m_builder.get_result_in_param(node.m_left->span(), ty_l);
this->visit_node_ptr(node.m_right);
- auto right = m_builder.get_result_in_lvalue(node.m_right->span(), ty_r);
+ auto right = m_builder.get_result_in_param(node.m_right->span(), ty_r);
::MIR::eBinOp op;
switch(node.m_op)
@@ -1069,13 +1148,19 @@ namespace {
{
TRACE_FUNCTION_F("_Borrow");
+ auto _ = save_and_edit(m_in_borrow, true);
+
const auto& ty_val = node.m_value->m_res_type;
this->visit_node_ptr(node.m_value);
auto val = m_builder.get_result_in_lvalue(node.m_value->span(), ty_val);
- auto res = m_builder.new_temporary(node.m_res_type);
- m_builder.push_stmt_assign( node.span(), res.as_Temporary(), ::MIR::RValue::make_Borrow({ 0, node.m_type, mv$(val) }));
- m_builder.set_result( node.span(), mv$(res) );
+ if( m_borrow_raise_target )
+ {
+ DEBUG("- Raising borrow to scope " << *m_borrow_raise_target);
+ m_builder.raise_variables(node.span(), val, *m_borrow_raise_target);
+ }
+
+ m_builder.set_result( node.span(), ::MIR::RValue::make_Borrow({ 0, node.m_type, mv$(val) }) );
}
void visit(::HIR::ExprNode_Cast& node) override
{
@@ -1233,8 +1318,9 @@ namespace {
}
else
{
- // Probably an error.
- TODO(node.span(), "MIR _Unsize to " << ty_out);
+ // Probably an error?
+ m_builder.set_result( node.span(), ::MIR::RValue::make_Cast({ mv$(ptr_lval), node.m_res_type.clone() }) );
+ //TODO(node.span(), "MIR _Unsize to " << ty_out);
}
),
(Slice,
@@ -1345,10 +1431,63 @@ namespace {
if( m_builder.is_type_owned_box( ty_val ) )
{
// Box magically derefs.
- // HACK: Break out of the switch used for TU_MATCH_DEF
- break;
}
- BUG(sp, "Deref on unsupported type - " << ty_val);
+ else
+ {
+ // TODO: Do operator replacement here after handling scope-raising for _Borrow
+ if( m_borrow_raise_target && m_in_borrow )
+ {
+ DEBUG("- Raising deref in borrow to scope " << *m_borrow_raise_target);
+ m_builder.raise_variables(node.span(), val, *m_borrow_raise_target);
+ }
+
+
+ const char* langitem = nullptr;
+ const char* method = nullptr;
+ ::HIR::BorrowType bt;
+ // - Uses the value's usage beacuse for T: Copy node.m_value->m_usage is Borrow, but node.m_usage is Move
+ switch( node.m_value->m_usage )
+ {
+ case ::HIR::ValueUsage::Unknown:
+ BUG(sp, "Unknown usage type of deref value");
+ break;
+ case ::HIR::ValueUsage::Borrow:
+ bt = ::HIR::BorrowType::Shared;
+ langitem = method = "deref";
+ break;
+ case ::HIR::ValueUsage::Mutate:
+ bt = ::HIR::BorrowType::Unique;
+ langitem = method = "deref_mut";
+ break;
+ case ::HIR::ValueUsage::Move:
+ TODO(sp, "ValueUsage::Move for desugared Deref of " << node.m_value->m_res_type);
+ break;
+ }
+ // Needs replacement, continue
+ assert(langitem);
+ assert(method);
+
+ // - Construct trait path - Index*<IdxTy>
+ auto method_path = ::HIR::Path(ty_val.clone(), ::HIR::GenericPath(m_builder.resolve().m_crate.get_lang_item_path(node.span(), langitem), {}), method);
+
+ // Store a borrow of the input value
+ ::std::vector<::MIR::Param> args;
+ args.push_back( m_builder.lvalue_or_temp(sp,
+ ::HIR::TypeRef::new_borrow(bt, node.m_value->m_res_type.clone()),
+ ::MIR::RValue::make_Borrow({0, bt, mv$(val)})
+ ) );
+ m_builder.moved_lvalue(node.span(), args[0].as_LValue());
+ val = m_builder.new_temporary(::HIR::TypeRef::new_borrow(bt, node.m_res_type.clone()));
+ // Call the above trait method
+ // Store result of that call in `val` (which will be derefed below)
+ auto ok_block = m_builder.new_bb_unlinked();
+ auto panic_block = m_builder.new_bb_unlinked();
+ m_builder.end_block(::MIR::Terminator::make_Call({ ok_block, panic_block, val.clone(), mv$(method_path), mv$(args) }));
+ m_builder.set_cur_block(panic_block);
+ m_builder.end_block(::MIR::Terminator::make_Diverge({}));
+
+ m_builder.set_cur_block(ok_block);
+ }
),
(Pointer,
// Deref on a pointer - TODO: Requires unsafe
@@ -1413,6 +1552,7 @@ namespace {
// TODO: Proper panic handling, including scope destruction
m_builder.set_cur_block(place__panic);
+ //m_builder.terminate_scope_early( node.span(), m_builder.fcn_scope() );
// TODO: Drop `place`
m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
m_builder.set_cur_block(place__ok);
@@ -1437,6 +1577,7 @@ namespace {
// TODO: Proper panic handling, including scope destruction
m_builder.set_cur_block(place_raw__panic);
+ //m_builder.terminate_scope_early( node.span(), m_builder.fcn_scope() );
// TODO: Drop `place`
m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
m_builder.set_cur_block(place_raw__ok);
@@ -1473,11 +1614,13 @@ namespace {
// TODO: Proper panic handling, including scope destruction
m_builder.set_cur_block(res__panic);
+ //m_builder.terminate_scope_early( node.span(), m_builder.fcn_scope() );
// TODO: Should this drop the value written to the rawptr?
// - No, becuase it's likely invalid now. Goodbye!
m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
m_builder.set_cur_block(res__ok);
+ m_builder.mark_value_assigned(node.span(), res);
m_builder.set_result( node.span(), mv$(res) );
}
@@ -1521,20 +1664,29 @@ namespace {
for(auto& arg : args)
{
this->visit_node_ptr(arg);
-
- if( args.size() == 1 )
+ if( !m_builder.block_active() )
+ {
+ auto tmp = m_builder.new_temporary(arg->m_res_type);
+ values.push_back( mv$(tmp) );
+ }
+ else if( args.size() == 1 )
{
values.push_back( m_builder.get_result_in_param(arg->span(), arg->m_res_type, /*allow_missing_value=*/true) );
}
else
{
- // NOTE: Have to allocate a new temporary because ordering matters
- auto tmp = m_builder.new_temporary(arg->m_res_type);
- if( m_builder.block_active() )
+ auto res = m_builder.get_result(arg->span());
+ if( auto* e = res.opt_Constant() )
{
- m_builder.push_stmt_assign( arg->span(), tmp.clone(), m_builder.get_result(arg->span()) );
+ values.push_back( mv$(*e) );
+ }
+ else
+ {
+ // NOTE: Have to allocate a new temporary because ordering matters
+ auto tmp = m_builder.new_temporary(arg->m_res_type);
+ m_builder.push_stmt_assign( arg->span(), tmp.clone(), mv$(res) );
+ values.push_back( mv$(tmp) );
}
- values.push_back( mv$(tmp) );
}
if(const auto* e = values.back().opt_LValue() )
@@ -1548,6 +1700,7 @@ namespace {
void visit(::HIR::ExprNode_CallPath& node) override
{
TRACE_FUNCTION_F("_CallPath " << node.m_path);
+ auto _ = save_and_edit(m_borrow_raise_target, nullptr);
auto values = get_args(node.m_args);
auto panic_block = m_builder.new_bb_unlinked();
@@ -1615,6 +1768,7 @@ namespace {
void visit(::HIR::ExprNode_CallValue& node) override
{
TRACE_FUNCTION_F("_CallValue " << node.m_value->m_res_type);
+ auto _ = save_and_edit(m_borrow_raise_target, nullptr);
// _CallValue is ONLY valid on function pointers (all others must be desugared)
ASSERT_BUG(node.span(), node.m_value->m_res_type.m_data.is_Function(), "Leftover _CallValue on a non-fn()");
@@ -1638,6 +1792,8 @@ namespace {
m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
m_builder.set_cur_block( next_block );
+ // TODO: Support diverging value calls
+ m_builder.mark_value_assigned(node.span(), res);
m_builder.set_result( node.span(), mv$(res) );
}
void visit(::HIR::ExprNode_CallMethod& node) override
@@ -1897,6 +2053,7 @@ namespace {
::MIR::LValue base_val;
if( node.m_base_value )
{
+ DEBUG("_StructLiteral - base");
this->visit_node_ptr(node.m_base_value);
base_val = m_builder.get_result_in_lvalue(node.m_base_value->span(), node.m_base_value->m_res_type);
}
@@ -1935,6 +2092,7 @@ namespace {
auto idx = ::std::find_if(fields.begin(), fields.end(), [&](const auto&x){ return x.first == ent.first; }) - fields.begin();
assert( !values_set[idx] );
values_set[idx] = true;
+ DEBUG("_StructLiteral - fld '" << ent.first << "' (idx " << idx << ")");
this->visit_node_ptr(valnode);
auto res = m_builder.get_result(valnode->span());
@@ -2035,6 +2193,7 @@ namespace {
void visit(::HIR::ExprNode_Closure& node) override
{
TRACE_FUNCTION_F("_Closure - " << node.m_obj_path);
+ auto _ = save_and_edit(m_borrow_raise_target, nullptr);
::std::vector< ::MIR::Param> vals;
vals.reserve( node.m_captures.size() );
@@ -2082,6 +2241,8 @@ namespace {
root_node.visit( ev );
}
+ MIR_Validate(resolve, path, fcn, args, ptr->m_res_type);
+
return ::MIR::FunctionPointer(new ::MIR::Function(mv$(fcn)));
}
diff --git a/src/mir/from_hir.hpp b/src/mir/from_hir.hpp
index 1b57a7f4..6b10d5bd 100644
--- a/src/mir/from_hir.hpp
+++ b/src/mir/from_hir.hpp
@@ -36,6 +36,11 @@ public:
ScopeHandle& operator=(const ScopeHandle& x) = delete;
ScopeHandle& operator=(ScopeHandle&& x) = delete;
~ScopeHandle();
+
+ friend ::std::ostream& operator<<(::std::ostream& os, const ScopeHandle& x) {
+ os << x.idx;
+ return os;
+ }
};
// - Needs to handle future DerefMove (which can't use the Box hack)
@@ -48,11 +53,15 @@ enum class InvalidType {
TAGGED_UNION_EX(VarState, (), Invalid, (
// Currently invalid
(Invalid, InvalidType),
- // Partially valid (Map of field states, Box is assumed to have one field)
+ // Partially valid (Map of field states)
(Partial, struct {
::std::vector<VarState> inner_states;
unsigned int outer_flag; // If ~0u there's no condition on the outer
}),
+ (MovedOut, struct {
+ ::std::unique_ptr<VarState> inner_state;
+ unsigned int outer_flag;
+ }),
// Optionally valid (integer indicates the drop flag index)
(Optional, unsigned int),
// Fully valid
@@ -99,6 +108,14 @@ TAGGED_UNION(ScopeType, Variables,
})
);
+enum class VarGroup
+{
+ Return,
+ Argument,
+ Variable,
+ Temporary,
+};
+
/// Helper class to construct MIR
class MirBuilder
{
@@ -118,7 +135,8 @@ class MirBuilder
bool m_result_valid;
// TODO: Extra information.
- //::std::vector<VarState> m_arg_states;
+ VarState m_return_state;
+ ::std::vector<VarState> m_arg_states;
::std::vector<VarState> m_variable_states;
::std::vector<VarState> m_temporary_states;
@@ -142,6 +160,11 @@ class MirBuilder
::std::vector<ScopeDef> m_scopes;
::std::vector<unsigned int> m_scope_stack;
ScopeHandle m_fcn_scope;
+
+ // LValue used only for the condition of `if`
+ // - Using a fixed temporary simplifies parts of lowering (scope related) and reduces load on
+ // the optimiser.
+ ::MIR::LValue m_if_cond_lval;
public:
MirBuilder(const Span& sp, const StaticTraitResolve& resolve, const ::HIR::Function::args_t& args, ::MIR::Function& output);
~MirBuilder();
@@ -169,6 +192,17 @@ public:
/// Obtains a result in a param (or a lvalue)
::MIR::Param get_result_in_param(const Span& sp, const ::HIR::TypeRef& ty, bool allow_missing_value=false);
+ ::MIR::LValue get_if_cond() const {
+ return m_if_cond_lval.clone();
+ }
+ ::MIR::LValue get_rval_in_if_cond(const Span& sp, ::MIR::RValue val) {
+ push_stmt_assign(sp, m_if_cond_lval.clone(), mv$(val));
+ return m_if_cond_lval.clone();
+ }
+ ::MIR::LValue get_result_in_if_cond(const Span& sp) {
+ return get_rval_in_if_cond(sp, get_result(sp));
+ }
+
// - Statements
// Push an assignment. NOTE: This also marks the rvalue as moved
void push_stmt_assign(const Span& sp, ::MIR::LValue dst, ::MIR::RValue val);
@@ -181,6 +215,9 @@ public:
// Push a setting/clearing of a drop flag
void push_stmt_set_dropflag_val(const Span& sp, unsigned int index, bool value);
void push_stmt_set_dropflag_other(const Span& sp, unsigned int index, unsigned int other);
+ void push_stmt_set_dropflag_default(const Span& sp, unsigned int index);
+
+ void push_stmt(const Span& sp, ::MIR::Statement stmt);
// - Block management
bool block_active() const {
@@ -190,12 +227,13 @@ public:
// Mark a value as initialised (used for Call, because it has to be done after the panic block is populated)
void mark_value_assigned(const Span& sp, const ::MIR::LValue& val);
- // Moves control of temporaries up to the next scope
- void raise_variables(const Span& sp, const ::MIR::LValue& val, const ScopeHandle& scope);
- void raise_variables(const Span& sp, const ::MIR::RValue& rval, const ScopeHandle& scope);
+ // Moves control of temporaries up to the specified scope (or to above it)
+ void raise_variables(const Span& sp, const ::MIR::LValue& val, const ScopeHandle& scope, bool to_above=false);
+ void raise_variables(const Span& sp, const ::MIR::RValue& rval, const ScopeHandle& scope, bool to_above=false);
void set_cur_block(unsigned int new_block);
::MIR::BasicBlockId pause_cur_block();
+
void end_block(::MIR::Terminator term);
::MIR::BasicBlockId new_bb_linked();
@@ -210,9 +248,16 @@ public:
ScopeHandle new_scope_temp(const Span& sp);
ScopeHandle new_scope_split(const Span& sp);
ScopeHandle new_scope_loop(const Span& sp);
+
+ /// Raises every variable defined in the source scope into the target scope
+ void raise_all(const Span& sp, ScopeHandle src, const ScopeHandle& target);
+ /// Drop all defined values in the scope (emits the drops if `cleanup` is set)
void terminate_scope(const Span& sp, ScopeHandle , bool cleanup=true);
+ /// Terminates a scope early (e.g. via return/break/...)
void terminate_scope_early(const Span& sp, const ScopeHandle& , bool loop_exit=false);
+ /// Marks the end of a split arm (end match arm, if body, ...)
void end_split_arm(const Span& sp, const ScopeHandle& , bool reachable);
+ /// Terminates the current split early (TODO: What does this mean?)
void end_split_arm_early(const Span& sp);
const ScopeHandle& fcn_scope() const {
@@ -224,11 +269,17 @@ public:
// Helper - Marks a variable/... as moved (and checks if the move is valid)
void moved_lvalue(const Span& sp, const ::MIR::LValue& lv);
private:
+ const VarState& get_slot_state(const Span& sp, VarGroup ty, unsigned int idx, unsigned int skip_count=0) const;
+ VarState& get_slot_state_mut(const Span& sp, VarGroup ty, unsigned int idx);
+
const VarState& get_variable_state(const Span& sp, unsigned int idx, unsigned int skip_count=0) const;
VarState& get_variable_state_mut(const Span& sp, unsigned int idx);
const VarState& get_temp_state(const Span& sp, unsigned int idx, unsigned int skip_count=0) const;
VarState& get_temp_state_mut(const Span& sp, unsigned int idx);
+ const VarState& get_val_state(const Span& sp, const ::MIR::LValue& lv, unsigned int skip_count=0);
+ VarState& get_val_state_mut(const Span& sp, const ::MIR::LValue& lv);
+
void terminate_loop_early(const Span& sp, ScopeType::Data_Loop& sd_loop);
void drop_value_from_state(const Span& sp, const VarState& vs, ::MIR::LValue lv);
diff --git a/src/mir/from_hir_match.cpp b/src/mir/from_hir_match.cpp
index aa9825cd..46331ca6 100644
--- a/src/mir/from_hir_match.cpp
+++ b/src/mir/from_hir_match.cpp
@@ -33,24 +33,35 @@ struct field_path_t
};
TAGGED_UNION_EX(PatternRule, (), Any,(
- // _ pattern
- (Any, struct {}),
// Enum variant
(Variant, struct { unsigned int idx; ::std::vector<PatternRule> sub_rules; }),
// Slice (includes desired length)
(Slice, struct { unsigned int len; ::std::vector<PatternRule> sub_rules; }),
// SplitSlice
// TODO: How can the negative offsets in the `trailing` be handled correctly? (both here and in the destructure)
- (SplitSlice, struct { unsigned int min_len; ::std::vector<PatternRule> leading, trailing; }),
+ (SplitSlice, struct { unsigned int min_len; unsigned int trailing_len; ::std::vector<PatternRule> leading, trailing; }),
// Boolean (different to Constant because of how restricted it is)
(Bool, bool),
// General value
(Value, ::MIR::Constant),
- (ValueRange, struct { ::MIR::Constant first, last; })
+ (ValueRange, struct { ::MIR::Constant first, last; }),
+ // _ pattern
+ (Any, struct {})
),
( , field_path(mv$(x.field_path)) ), (field_path = mv$(x.field_path);),
(
field_path_t field_path;
+
+ bool operator<(const PatternRule& x) const {
+ return this->ord(x) == OrdLess;
+ }
+ bool operator==(const PatternRule& x) const {
+ return this->ord(x) == OrdEqual;
+ }
+ bool operator!=(const PatternRule& x) const {
+ return this->ord(x) != OrdEqual;
+ }
+ Ordering ord(const PatternRule& x) const;
)
);
::std::ostream& operator<<(::std::ostream& os, const PatternRule& x);
@@ -74,11 +85,14 @@ struct ArmCode {
::MIR::BasicBlockId cond_end;
::MIR::LValue cond_lval;
::std::vector< ::MIR::BasicBlockId> destructures; // NOTE: Incomplete
+
+ mutable ::MIR::BasicBlockId cond_fail_tgt = 0;
};
typedef ::std::vector<PatternRuleset> t_arm_rules;
void MIR_LowerHIR_Match_Simple( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNode_Match& node, ::MIR::LValue match_val, t_arm_rules arm_rules, ::std::vector<ArmCode> arm_code, ::MIR::BasicBlockId first_cmp_block);
+void MIR_LowerHIR_Match_Grouped( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNode_Match& node, ::MIR::LValue match_val, t_arm_rules arm_rules, ::std::vector<ArmCode> arms_code, ::MIR::BasicBlockId first_cmp_block );
void MIR_LowerHIR_Match_DecisionTree( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNode_Match& node, ::MIR::LValue match_val, t_arm_rules arm_rules, ::std::vector<ArmCode> arm_code , ::MIR::BasicBlockId first_cmp_block);
/// Helper to construct rules from a passed pattern
struct PatternRulesetBuilder
@@ -103,6 +117,82 @@ struct PatternRulesetBuilder
void push_rule(PatternRule r);
};
+class RulesetRef
+{
+ ::std::vector<PatternRuleset>* m_rules_vec = nullptr;
+ RulesetRef* m_parent = nullptr;
+ size_t m_parent_ofs=0; // If len == 0, this is the innner index, else it's the base
+ size_t m_parent_len=0;
+public:
+ RulesetRef(::std::vector<PatternRuleset>& rules):
+ m_rules_vec(&rules)
+ {
+ }
+ RulesetRef(RulesetRef& parent, size_t start, size_t n):
+ m_parent(&parent),
+ m_parent_ofs(start),
+ m_parent_len(n)
+ {
+ }
+ RulesetRef(RulesetRef& parent, size_t idx):
+ m_parent(&parent),
+ m_parent_ofs(idx)
+ {
+ }
+
+ size_t size() const {
+ if( m_rules_vec ) {
+ return m_rules_vec->size();
+ }
+ else if( m_parent_len ) {
+ return m_parent_len;
+ }
+ else {
+ return m_parent->size();
+ }
+ }
+ RulesetRef slice(size_t s, size_t n) {
+ return RulesetRef(*this, s, n);
+ }
+
+ const ::std::vector<PatternRule>& operator[](size_t i) const {
+ if( m_rules_vec ) {
+ return (*m_rules_vec)[i].m_rules;
+ }
+ else if( m_parent_len ) {
+ return (*m_parent)[m_parent_ofs + i];
+ }
+ else {
+ // Fun part - Indexes into inner patterns
+ const auto& parent_rule = (*m_parent)[i][m_parent_ofs];
+ if(const auto* re = parent_rule.opt_Variant()) {
+ return re->sub_rules;
+ }
+ else {
+ throw "TODO";
+ }
+ }
+ }
+ void swap(size_t a, size_t b) {
+ TRACE_FUNCTION_F(a << ", " << b);
+ if( m_rules_vec ) {
+ ::std::swap( (*m_rules_vec)[a], (*m_rules_vec)[b] );
+ }
+ else {
+ assert(m_parent);
+ if( m_parent_len ) {
+ m_parent->swap(m_parent_ofs + a, m_parent_ofs + b);
+ }
+ else {
+ m_parent->swap(a, b);
+ }
+ }
+ }
+};
+
+void sort_rulesets(RulesetRef rulesets, size_t idx=0);
+void sort_rulesets_inner(RulesetRef rulesets, size_t idx);
+
// --------------------------------------------------------------------
// CODE
// --------------------------------------------------------------------
@@ -282,8 +372,7 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
auto tmp_scope = builder.new_scope_temp(arm.m_cond->span());
conv.visit_node_ptr( arm.m_cond );
- ac.cond_lval = builder.get_result_in_lvalue(arm.m_cond->span(), ::HIR::TypeRef(::HIR::CoreType::Bool));
- // NOTE: Terminating the scope slightly early is safe, because the resulting boolean temp isn't invalidated.
+ ac.cond_lval = builder.get_result_in_if_cond(arm.m_cond->span());
builder.terminate_scope( arm.m_code->span(), mv$(tmp_scope) );
ac.cond_end = builder.pause_cur_block();
@@ -366,20 +455,55 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
for(const auto& arm_rule : arm_rules)
{
- DEBUG("> (" << arm_rule.arm_idx << ", " << arm_rule.pat_idx << ") - " << arm_rule.m_rules);
+ DEBUG("> (" << arm_rule.arm_idx << ", " << arm_rule.pat_idx << ") - " << arm_rule.m_rules
+ << (arm_code[arm_rule.arm_idx].has_condition ? " (cond)" : ""));
}
- // TODO: Don't generate inner code until decisions are generated (keeps MIR flow nice)
+ // TODO: Remove columns that are all `_`?
+ // - Ideally, only accessible structures would be fully destructured like this, making this check redundant
+
+ // Sort rules using the following restrictions:
+ // - A rule cannot be reordered across an item that has an overlapping match set
+ // > e.g. nothing can cross _
+ // > equal rules cannot be reordered
+ // > Values cannot cross ranges that contain the value
+ // > This will have to be a bubble sort to ensure that it's correctly stable.
+ sort_rulesets(arm_rules);
+ DEBUG("Post-sort");
+ for(const auto& arm_rule : arm_rules)
+ {
+ DEBUG("> (" << arm_rule.arm_idx << ", " << arm_rule.pat_idx << ") - " << arm_rule.m_rules
+ << (arm_code[arm_rule.arm_idx].has_condition ? " (cond)" : ""));
+ }
+ // De-duplicate arms (emitting a warning when it happens)
+ // - This allows later code to assume that duplicate arms are a codegen bug.
+ if( ! arm_rules.empty() )
+ {
+ for(auto it = arm_rules.begin()+1; it != arm_rules.end(); )
+ {
+ // If duplicate rule, (and neither is conditional)
+ if( (it-1)->m_rules == it->m_rules && !arm_code[it->arm_idx].has_condition && !arm_code[(it-1)->arm_idx].has_condition )
+ {
+ // Remove
+ it = arm_rules.erase(it);
+ WARNING(node.m_arms[it->arm_idx].m_code->span(), W0000, "Duplicate match pattern, unreachable code");
+ }
+ else
+ {
+ ++ it;
+ }
+ }
+ }
- // TODO: Detect if a rule is ordering-dependent. In this case we currently have to fall back on the simple match code
- // - A way would be to search for `_` rules with non _ rules following. Would false-positive in some cases, but shouldn't false negative
- // TODO: Merge equal rulesets if there's one with no condition.
+ // TODO: Don't generate inner code until decisions are generated (keeps MIR flow nice)
+ // - Challenging, as the decision code needs somewhere to jump to.
+ // - Allocating a BB and then rewriting references to it is a possibility.
if( fall_back_on_simple ) {
MIR_LowerHIR_Match_Simple( builder, conv, node, mv$(match_val), mv$(arm_rules), mv$(arm_code), first_cmp_block );
}
else {
- MIR_LowerHIR_Match_DecisionTree( builder, conv, node, mv$(match_val), mv$(arm_rules), mv$(arm_code), first_cmp_block );
+ MIR_LowerHIR_Match_Grouped( builder, conv, node, mv$(match_val), mv$(arm_rules), mv$(arm_code), first_cmp_block );
}
builder.set_cur_block( next_block );
@@ -424,6 +548,56 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
return os;
}
+::Ordering PatternRule::ord(const PatternRule& x) const
+{
+ if(tag() != x.tag())
+ {
+ return tag() < x.tag() ? ::OrdLess : ::OrdGreater;
+ }
+ TU_MATCHA( (*this, x), (te, xe),
+ (Any, return OrdEqual;),
+ (Variant,
+ if(te.idx != xe.idx) return ::ord(te.idx, xe.idx);
+ assert( te.sub_rules.size() == xe.sub_rules.size() );
+ for(unsigned int i = 0; i < te.sub_rules.size(); i ++)
+ {
+ auto cmp = te.sub_rules[i].ord( xe.sub_rules[i] );
+ if( cmp != ::OrdEqual )
+ return cmp;
+ }
+ return ::OrdEqual;
+ ),
+ (Slice,
+ if(te.len != xe.len) return ::ord(te.len, xe.len);
+ // Wait? Why would the rule count be the same?
+ assert( te.sub_rules.size() == xe.sub_rules.size() );
+ for(unsigned int i = 0; i < te.sub_rules.size(); i ++)
+ {
+ auto cmp = te.sub_rules[i].ord( xe.sub_rules[i] );
+ if( cmp != ::OrdEqual )
+ return cmp;
+ }
+ return ::OrdEqual;
+ ),
+ (SplitSlice,
+ auto rv = ::ord( te.leading, xe.leading );
+ if(rv != OrdEqual) return rv;
+ return ::ord(te.trailing, xe.trailing);
+ ),
+ (Bool,
+ return ::ord( te, xe );
+ ),
+ (Value,
+ return ::ord( te, xe );
+ ),
+ (ValueRange,
+ if( te.first != xe.first )
+ return ::ord(te.first, xe.first);
+ return ::ord(te.last, xe.last);
+ )
+ )
+ throw "";
+}
::Ordering PatternRuleset::rule_is_before(const PatternRule& l, const PatternRule& r)
{
if( l.tag() != r.tag() ) {
@@ -635,6 +809,7 @@ void PatternRulesetBuilder::append_from_lit(const Span& sp, const ::HIR::Literal
PatternRulesetBuilder sub_builder { this->m_resolve };
sub_builder.m_field_path = m_field_path;
+ sub_builder.m_field_path.push_back(var_idx);
sub_builder.m_field_path.push_back(0);
TU_MATCH( ::HIR::Enum::Variant, (var_def.second), (fields_def),
@@ -1074,6 +1249,7 @@ void PatternRulesetBuilder::append_from(const Span& sp, const ::HIR::Pattern& pa
const auto& fields_def = var_def.second.as_Tuple();
PatternRulesetBuilder sub_builder { this->m_resolve };
sub_builder.m_field_path = m_field_path;
+ sub_builder.m_field_path.push_back(pe.binding_idx);
sub_builder.m_field_path.push_back(0);
for( unsigned int i = 0; i < pe.sub_patterns.size(); i ++ )
{
@@ -1107,6 +1283,7 @@ void PatternRulesetBuilder::append_from(const Span& sp, const ::HIR::Pattern& pa
// 2. Iterate this list and recurse on the patterns
PatternRulesetBuilder sub_builder { this->m_resolve };
sub_builder.m_field_path = m_field_path;
+ sub_builder.m_field_path.push_back(pe.binding_idx);
sub_builder.m_field_path.push_back(0);
for( unsigned int i = 0; i < tmp.size(); i ++ )
{
@@ -1212,12 +1389,14 @@ void PatternRulesetBuilder::append_from(const Span& sp, const ::HIR::Pattern& pa
sub_builder.m_field_path.back() = 0;
if( pe.trailing.size() )
{
+ // Needs a way of encoding the negative offset in the field path
TODO(sp, "SplitSlice on [T] with trailing - " << pat);
}
auto trailing = mv$(sub_builder.m_rules);
this->push_rule( PatternRule::make_SplitSlice({
static_cast<unsigned int>(pe.leading.size() + pe.trailing.size()),
+ static_cast<unsigned int>(pe.trailing.size()),
mv$(leading), mv$(trailing)
}) );
)
@@ -1281,6 +1460,227 @@ void PatternRulesetBuilder::append_from(const Span& sp, const ::HIR::Pattern& pa
}
namespace {
+ // Order rules ignoring inner rules
+ Ordering ord_rule_compatible(const PatternRule& a, const PatternRule& b)
+ {
+ if(a.tag() != b.tag())
+ return ::ord( (unsigned)a.tag(), b.tag() );
+
+ TU_MATCHA( (a, b), (ae, be),
+ (Any,
+ return OrdEqual;
+ ),
+ (Variant,
+ return ::ord(ae.idx, be.idx);
+ ),
+ (Slice,
+ return ::ord(ae.len, be.len);
+ ),
+ (SplitSlice,
+ auto v = ::ord(ae.leading.size(), be.leading.size());
+ if(v != OrdEqual) return v;
+ v = ::ord(ae.trailing.size(), be.trailing.size());
+ if(v != OrdEqual) return v;
+ return OrdEqual;
+ ),
+ (Bool,
+ return ::ord(ae, be);
+ ),
+ (Value,
+ return ::ord(ae, be);
+ ),
+ (ValueRange,
+ auto v = ::ord(ae.first, be.first);
+ if(v != OrdEqual) return v;
+ return ::ord(ae.last, be.last);
+ )
+ )
+ throw "";
+ }
+ bool rule_compatible(const PatternRule& a, const PatternRule& b)
+ {
+ return ord_rule_compatible(a,b) == OrdEqual;
+ }
+
+ bool rules_overlap(const PatternRule& a, const PatternRule& b)
+ {
+ if( a.is_Any() || b.is_Any() )
+ return true;
+
+ // Defensive: If a constant is encountered, assume it overlaps with anything
+ if(const auto* ae = a.opt_Value()) {
+ if(ae->is_Const())
+ return true;
+ }
+ if(const auto* be = b.opt_Value()) {
+ if(be->is_Const())
+ return true;
+ }
+
+ // Value Range: Overlaps with contained values.
+ if(const auto* ae = a.opt_ValueRange() )
+ {
+ if(const auto* be = b.opt_Value() )
+ {
+ return ( ae->first <= *be && *be <= ae->last );
+ }
+ else if( const auto* be = b.opt_ValueRange() )
+ {
+ // Start of B within A
+ if( ae->first <= be->first && be->first <= ae->last )
+ return true;
+ // End of B within A
+ if( ae->first <= be->last && be->last <= ae->last )
+ return true;
+ // Start of A within B
+ if( be->first <= ae->first && ae->first <= be->last )
+ return true;
+ // End of A within B
+ if( be->first <= ae->last && ae->last <= be->last )
+ return true;
+
+ // Disjoint
+ return false;
+ }
+ else
+ {
+ TODO(Span(), "Check overlap of " << a << " and " << b);
+ }
+ }
+ if(const auto* be = b.opt_ValueRange())
+ {
+ if(const auto* ae = a.opt_Value() )
+ {
+ return (be->first <= *ae && *ae <= be->last);
+ }
+ // Note: A can't be ValueRange
+ else
+ {
+ TODO(Span(), "Check overlap of " << a << " and " << b);
+ }
+ }
+
+ // SplitSlice patterns overlap with other SplitSlice patterns and larger slices
+ if(const auto* ae = a.opt_SplitSlice())
+ {
+ if( b.is_SplitSlice() )
+ {
+ return true;
+ }
+ else if( const auto* be = b.opt_Slice() )
+ {
+ return be->len >= ae->min_len;
+ }
+ else
+ {
+ TODO(Span(), "Check overlap of " << a << " and " << b);
+ }
+ }
+ if(const auto* be = b.opt_SplitSlice())
+ {
+ if( const auto* ae = a.opt_Slice() )
+ {
+ return ae->len >= be->min_len;
+ }
+ else
+ {
+ TODO(Span(), "Check overlap of " << a << " and " << b);
+ }
+ }
+
+ // Otherwise, If rules are approximately equal, they overlap
+ return ( ord_rule_compatible(a, b) == OrdEqual );
+ }
+}
+void sort_rulesets(RulesetRef rulesets, size_t idx)
+{
+ if(rulesets.size() < 2)
+ return ;
+
+ bool found_non_any = false;
+ for(size_t i = 0; i < rulesets.size(); i ++)
+ if( !rulesets[i][idx].is_Any() )
+ found_non_any = true;
+ if( found_non_any )
+ {
+ TRACE_FUNCTION_F(idx);
+ for(size_t i = 0; i < rulesets.size(); i ++)
+ DEBUG("- " << i << ": " << rulesets[i]);
+
+ bool action_taken;
+ do
+ {
+ action_taken = false;
+ for(size_t i = 0; i < rulesets.size()-1; i ++)
+ {
+ if( rules_overlap(rulesets[i][idx], rulesets[i+1][idx]) )
+ {
+ // Don't move
+ }
+ else if( ord_rule_compatible(rulesets[i][idx], rulesets[i+1][idx]) == OrdGreater )
+ {
+ rulesets.swap(i, i+1);
+ action_taken = true;
+ }
+ else
+ {
+ }
+ }
+ } while(action_taken);
+ for(size_t i = 0; i < rulesets.size(); i ++)
+ DEBUG("- " << i << ": " << rulesets[i]);
+
+ // TODO: Print sorted ruleset
+
+ // Where compatible, sort insides
+ size_t start = 0;
+ for(size_t i = 1; i < rulesets.size(); i++)
+ {
+ if( ord_rule_compatible(rulesets[i][idx], rulesets[start][idx]) != OrdEqual )
+ {
+ sort_rulesets_inner(rulesets.slice(start, i-start), idx);
+ start = i;
+ }
+ }
+ sort_rulesets_inner(rulesets.slice(start, rulesets.size()-start), idx);
+
+ // Iterate onwards where rules are equal
+ if( idx + 1 < rulesets[0].size() )
+ {
+ size_t start = 0;
+ for(size_t i = 1; i < rulesets.size(); i++)
+ {
+ if( rulesets[i][idx] != rulesets[start][idx] )
+ {
+ sort_rulesets(rulesets.slice(start, i-start), idx+1);
+ start = i;
+ }
+ }
+ sort_rulesets(rulesets.slice(start, rulesets.size()-start), idx+1);
+ }
+ }
+ else
+ {
+ if( idx + 1 < rulesets[0].size() )
+ {
+ sort_rulesets(rulesets, idx + 1);
+ }
+ }
+}
+void sort_rulesets_inner(RulesetRef rulesets, size_t idx)
+{
+ TRACE_FUNCTION_F(idx << " - " << rulesets[0][idx].tag_str());
+ if( const auto* re = rulesets[0][idx].opt_Variant() )
+ {
+ // Sort rules based on contents of enum
+ if( re->sub_rules.size() > 0 )
+ {
+ sort_rulesets(RulesetRef(rulesets, idx), 0);
+ }
+ }
+}
+
+namespace {
void get_ty_and_val(
const Span& sp, const StaticTraitResolve& resolve,
const ::HIR::TypeRef& top_ty, const ::MIR::LValue& top_val,
@@ -1296,7 +1696,7 @@ namespace {
ASSERT_BUG(sp, field_path_ofs <= field_path.size(), "Field path offset " << field_path_ofs << " is larger than the path [" << field_path << "]");
for(unsigned int i = field_path_ofs; i < field_path.size(); i ++ )
{
- auto idx = field_path.data[i];
+ unsigned idx = field_path.data[i];
TU_MATCHA( (cur_ty->m_data), (e),
(Infer, BUG(sp, "Ivar for in match type"); ),
@@ -1378,7 +1778,43 @@ namespace {
lval = ::MIR::LValue::make_Downcast({ box$(lval), idx });
),
(Enum,
- BUG(sp, "Destructuring an enum - " << *cur_ty);
+ auto monomorph_to_ptr = [&](const auto& ty)->const auto* {
+ if( monomorphise_type_needed(ty) ) {
+ auto rv = monomorphise_type(sp, pbe->m_params, e.path.m_data.as_Generic().m_params, ty);
+ resolve.expand_associated_types(sp, rv);
+ tmp_ty = mv$(rv);
+ return &tmp_ty;
+ }
+ else {
+ return &ty;
+ }
+ };
+ ASSERT_BUG(sp, idx < pbe->m_variants.size(), "Variant index (" << idx << ") out of range (" << pbe->m_variants.size() << ") for enum " << *cur_ty);
+ const auto& var = pbe->m_variants[idx];
+
+ i++;
+ assert(i < field_path.data.size());
+ unsigned fld_idx = field_path.data[i];
+
+ TU_MATCHA( (var.second), (e),
+ (Unit,
+ BUG(sp, "Unit variant being destructured");
+ ),
+ (Value,
+ BUG(sp, "Value variant being destructured");
+ ),
+ (Tuple,
+ ASSERT_BUG(sp, fld_idx < e.size(), "Variant field index (" << fld_idx << ") out of range (" << e.size() << ") for enum " << *cur_ty << "::" << var.first);
+ cur_ty = monomorph_to_ptr(e[fld_idx].ent);
+ ),
+ (Struct,
+ ASSERT_BUG(sp, fld_idx < e.size(), "Variant field index (" << fld_idx << ") out of range (" << e.size() << ") for enum " << *cur_ty << "::" << var.first);
+ cur_ty = monomorph_to_ptr(e[fld_idx].second.ent);
+ )
+ )
+ DEBUG("*cur_ty = " << *cur_ty);
+ lval = ::MIR::LValue::make_Downcast({ box$(lval), idx });
+ lval = ::MIR::LValue::make_Field({ box$(lval), fld_idx });
)
)
),
@@ -1402,12 +1838,15 @@ namespace {
),
(Borrow,
ASSERT_BUG(sp, idx == FIELD_DEREF, "Destructure of borrow doesn't correspond to a deref in the path");
+ DEBUG(i << " " << *cur_ty << " - " << cur_ty << " " << &tmp_ty);
if( cur_ty == &tmp_ty ) {
- tmp_ty = mv$(*tmp_ty.m_data.as_Borrow().inner);
+ auto ip = mv$(tmp_ty.m_data.as_Borrow().inner);
+ tmp_ty = mv$(*ip);
}
else {
cur_ty = &*e.inner;
}
+ DEBUG(i << " " << *cur_ty);
lval = ::MIR::LValue::make_Deref({ box$(lval) });
),
(Pointer,
@@ -1437,7 +1876,6 @@ void MIR_LowerHIR_Match_Simple( MirBuilder& builder, MirConverter& conv, ::HIR::
TRACE_FUNCTION;
// 1. Generate pattern matches
- unsigned int rule_idx = 0;
builder.set_cur_block( first_cmp_block );
for( unsigned int arm_idx = 0; arm_idx < node.m_arms.size(); arm_idx ++ )
{
@@ -1451,6 +1889,10 @@ void MIR_LowerHIR_Match_Simple( MirBuilder& builder, MirConverter& conv, ::HIR::
if( arm_code.destructures[i] == 0 )
continue ;
+ size_t rule_idx = 0;
+ for(; rule_idx < arm_rules.size(); rule_idx++)
+ if( arm_rules[rule_idx].arm_idx == arm_idx && arm_rules[rule_idx].pat_idx == i )
+ break;
const auto& pat_rule = arm_rules[rule_idx];
bool is_last_pat = (i+1 == arm.m_patterns.size());
auto next_pattern_bb = (!is_last_pat ? builder.new_bb_unlinked() : next_arm_bb);
@@ -1478,8 +1920,6 @@ void MIR_LowerHIR_Match_Simple( MirBuilder& builder, MirConverter& conv, ::HIR::
{
builder.set_cur_block( next_pattern_bb );
}
-
- rule_idx ++;
}
if( arm_code.has_condition )
{
@@ -1549,12 +1989,28 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
auto succ_bb = builder.new_bb_unlinked();
auto test_val = ::MIR::Param( ::MIR::Constant::make_Uint({ re.as_Uint().v, te }));
- auto cmp_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ val.clone(), ::MIR::eBinOp::EQ, mv$(test_val) }));
- builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval), succ_bb, fail_bb }) );
+ builder.push_stmt_assign(sp, builder.get_if_cond(), ::MIR::RValue::make_BinOp({ val.clone(), ::MIR::eBinOp::EQ, mv$(test_val) }));
+ builder.end_block( ::MIR::Terminator::make_If({ builder.get_if_cond(), succ_bb, fail_bb }) );
builder.set_cur_block(succ_bb);
),
(ValueRange,
- TODO(sp, "Simple match over primitive - " << ty << " - ValueRange");
+ auto succ_bb = builder.new_bb_unlinked();
+ auto test_bb_2 = builder.new_bb_unlinked();
+
+ auto test_lt_val = ::MIR::Param(::MIR::Constant::make_Uint({ re.first.as_Uint().v, te }));
+ auto test_gt_val = ::MIR::Param(::MIR::Constant::make_Uint({ re.last.as_Uint().v, te }));
+
+ // IF `val` < `first` : fail_bb
+ auto cmp_lt_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ ::MIR::Param(val.clone()), ::MIR::eBinOp::LT, mv$(test_lt_val) }));
+ builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lt_lval), fail_bb, test_bb_2 }) );
+
+ builder.set_cur_block(test_bb_2);
+
+ // IF `val` > `last` : fail_bb
+ auto cmp_gt_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ ::MIR::Param(val.clone()), ::MIR::eBinOp::GT, mv$(test_gt_val) }));
+ builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_gt_lval), fail_bb, succ_bb }) );
+
+ builder.set_cur_block(succ_bb);
)
)
break;
@@ -1577,7 +2033,23 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
builder.set_cur_block(succ_bb);
),
(ValueRange,
- TODO(sp, "Simple match over primitive - " << ty << " - ValueRange");
+ auto succ_bb = builder.new_bb_unlinked();
+ auto test_bb_2 = builder.new_bb_unlinked();
+
+ auto test_lt_val = ::MIR::Param(::MIR::Constant::make_Int({ re.first.as_Int().v, te }));
+ auto test_gt_val = ::MIR::Param(::MIR::Constant::make_Int({ re.last.as_Int().v, te }));
+
+ // IF `val` < `first` : fail_bb
+ auto cmp_lt_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ ::MIR::Param(val.clone()), ::MIR::eBinOp::LT, mv$(test_lt_val) }));
+ builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lt_lval), fail_bb, test_bb_2 }) );
+
+ builder.set_cur_block(test_bb_2);
+
+ // IF `val` > `last` : fail_bb
+ auto cmp_gt_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ ::MIR::Param(val.clone()), ::MIR::eBinOp::GT, mv$(test_gt_val) }));
+ builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_gt_lval), fail_bb, succ_bb }) );
+
+ builder.set_cur_block(succ_bb);
)
)
break;
@@ -1617,7 +2089,38 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
break;
case ::HIR::CoreType::F32:
case ::HIR::CoreType::F64:
- TODO(sp, "Simple match over float - " << ty);
+ TU_MATCH_DEF( PatternRule, (rule), (re),
+ (
+ BUG(sp, "PatternRule for float is not Value or ValueRange");
+ ),
+ (Value,
+ auto succ_bb = builder.new_bb_unlinked();
+
+ auto test_val = ::MIR::Param(::MIR::Constant::make_Float({ re.as_Float().v, te }));
+ auto cmp_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ val.clone(), ::MIR::eBinOp::EQ, mv$(test_val) }));
+ builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval), succ_bb, fail_bb }) );
+ builder.set_cur_block(succ_bb);
+ ),
+ (ValueRange,
+ auto succ_bb = builder.new_bb_unlinked();
+ auto test_bb_2 = builder.new_bb_unlinked();
+
+ auto test_lt_val = ::MIR::Param(::MIR::Constant::make_Float({ re.first.as_Float().v, te }));
+ auto test_gt_val = ::MIR::Param(::MIR::Constant::make_Float({ re.last.as_Float().v, te }));
+
+ // IF `val` < `first` : fail_bb
+ auto cmp_lt_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ ::MIR::Param(val.clone()), ::MIR::eBinOp::LT, mv$(test_lt_val) }));
+ builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lt_lval), fail_bb, test_bb_2 }) );
+
+ builder.set_cur_block(test_bb_2);
+
+ // IF `val` > `last` : fail_bb
+ auto cmp_gt_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ ::MIR::Param(val.clone()), ::MIR::eBinOp::GT, mv$(test_gt_val) }));
+ builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_gt_lval), fail_bb, succ_bb }) );
+
+ builder.set_cur_block(succ_bb);
+ )
+ )
break;
case ::HIR::CoreType::Str: {
ASSERT_BUG(sp, rule.is_Value() && rule.as_Value().is_StaticString(), "");
@@ -1658,7 +2161,11 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
TODO(sp, "Match over Union");
),
(Enum,
- auto monomorph = [&](const auto& ty) { return monomorphise_type(sp, pbe->m_params, te.path.m_data.as_Generic().m_params, ty); };
+ auto monomorph = [&](const auto& ty) {
+ auto rv = monomorphise_type(sp, pbe->m_params, te.path.m_data.as_Generic().m_params, ty);
+ builder.resolve().expand_associated_types(sp, rv);
+ return rv;
+ };
ASSERT_BUG(sp, rule.is_Variant(), "Rule for enum isn't Any or Variant");
const auto& re = rule.as_Variant();
unsigned int var_idx = re.idx;
@@ -1696,7 +2203,7 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
// Recurse with the new ruleset
MIR_LowerHIR_Match_Simple__GeneratePattern(builder, sp,
re.sub_rules.data(), re.sub_rules.size(),
- fake_tup, ::MIR::LValue::make_Downcast({ box$(val.clone()), var_idx }), rule.field_path.size(),
+ fake_tup, ::MIR::LValue::make_Downcast({ box$(val.clone()), var_idx }), rule.field_path.size()+1,
fail_bb
);
),
@@ -1713,7 +2220,7 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
// Recurse with the new ruleset
MIR_LowerHIR_Match_Simple__GeneratePattern(builder, sp,
re.sub_rules.data(), re.sub_rules.size(),
- fake_tup, ::MIR::LValue::make_Downcast({ box$(val.clone()), var_idx }), rule.field_path.size(),
+ fake_tup, ::MIR::LValue::make_Downcast({ box$(val.clone()), var_idx }), rule.field_path.size()+1,
fail_bb
);
)
@@ -1733,7 +2240,7 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
),
(Array,
TODO(sp, "Match directly on array?");
- /*
+ #if 0
unsigned int total = 0;
for( unsigned int i = 0; i < te.size_val; i ++ ) {
unsigned int cnt = MIR_LowerHIR_Match_Simple__GeneratePattern(
@@ -1748,7 +2255,7 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
if( num_rules == 0 )
return total;
}
- */
+ #endif
),
(Slice,
ASSERT_BUG(sp, rule.is_Slice() || rule.is_SplitSlice() || (rule.is_Value() && rule.as_Value().is_Bytes()), "Can only match slice with Bytes or Slice rules - " << rule);
@@ -1833,1963 +2340,960 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
return 0;
}
-// --------------------------------------------------------------------
-// Decision Tree
-// --------------------------------------------------------------------
+// --
+// Match v2 Algo - Grouped rules
+// --
+
-// ## Create descision tree in-memory based off the ruleset
-// > Tree contains an lvalue and a set of possibilities (PatternRule) connected to another tree or to a branch index
-struct DecisionTreeNode
+class t_rules_subset
{
- TAGGED_UNION( Branch, Unset,
- (Unset, struct{}),
- (Subtree, ::std::unique_ptr<DecisionTreeNode>),
- (Terminal, unsigned int)
- );
-
- template<typename T>
- struct Range
+ ::std::vector<const ::std::vector<PatternRule>*> rule_sets;
+ bool is_arm_indexes;
+ ::std::vector<size_t> arm_idxes;
+public:
+ t_rules_subset(size_t exp, bool is_arm_indexes):
+ is_arm_indexes(is_arm_indexes)
{
- T start;
- T end;
-
- // `x` starts after this range ends
- bool operator<(const Range<T>& x) const {
- return (end < x.start);
- }
- // `x` falls above the end of this range
- bool operator<(const T& x) const {
- return (end < x);
- }
-
- // `x` ends before this starts, or overlaps
- bool operator>=(const Range<T>& x) const {
- return start > x.end || ovelaps(x);
- }
- // `x` is before or within this range
- bool operator>=(const T& x) const {
- return start > x || contains(x);
- }
+ rule_sets.reserve(exp);
+ arm_idxes.reserve(exp);
+ }
- bool operator>(const Range<T>& x) const {
- return (start > x.end);
- }
- bool operator>(const T& x) const {
- return (start > x);
- }
+ size_t size() const {
+ return rule_sets.size();
+ }
+ const ::std::vector<PatternRule>& operator[](size_t n) const {
+ return *rule_sets[n];
+ }
+ bool is_arm() const { return is_arm_indexes; }
+ ::std::pair<size_t,size_t> arm_idx(size_t n) const {
+ assert(is_arm_indexes);
+ auto v = arm_idxes.at(n);
+ return ::std::make_pair(v & 0xFFF, v >> 12);
+ }
+ ::MIR::BasicBlockId bb_idx(size_t n) const {
+ assert(!is_arm_indexes);
+ return arm_idxes.at(n);
+ }
- bool operator==(const Range<T>& x) const {
- return start == x.start && end == x.end;
- }
- bool operator!=(const Range<T>& x) const {
- return start != x.start || end != x.end;
- }
+ void sub_sort(size_t ofs, size_t start, size_t n)
+ {
+ ::std::vector<size_t> v;
+ for(size_t i = 0; i < n; i++)
+ v.push_back(start + i);
+ // Sort rules based on just the value (ignore inner rules)
+ ::std::stable_sort( v.begin(), v.end(), [&](auto a, auto b){ return ord_rule_compatible( (*rule_sets[a])[ofs], (*rule_sets[b])[ofs]) == OrdLess; } );
- bool contains(const T& x) const {
- return (start <= x && x <= end);
+ // Reorder contents to above sorting
+ {
+ decltype(this->rule_sets) tmp;
+ for(auto i : v)
+ tmp.push_back(rule_sets[i]);
+ ::std::copy( tmp.begin(), tmp.end(), rule_sets.begin() + start );
}
- bool overlaps(const Range<T>& x) const {
- return (x.start <= start && start <= x.end) || (x.start <= end && end <= x.end);
+ {
+ decltype(this->arm_idxes) tmp;
+ for(auto i : v)
+ tmp.push_back(arm_idxes[i]);
+ ::std::copy( tmp.begin(), tmp.end(), arm_idxes.begin() + start );
}
+ }
- friend ::std::ostream& operator<<(::std::ostream& os, const Range<T>& x) {
- if( x.start == x.end ) {
- return os << x.start;
- }
- else {
- return os << x.start << " ... " << x.end;
- }
+ t_rules_subset sub_slice(size_t ofs, size_t n)
+ {
+ t_rules_subset rv { n, this->is_arm_indexes };
+ rv.rule_sets.reserve(n);
+ for(size_t i = 0; i < n; i++)
+ {
+ rv.rule_sets.push_back( this->rule_sets[ofs+i] );
+ rv.arm_idxes.push_back( this->arm_idxes[ofs+i] );
}
- };
-
- TAGGED_UNION( Values, Unset,
- (Unset, struct {}),
- (Bool, struct { Branch false_branch, true_branch; }),
- (Variant, ::std::vector< ::std::pair<unsigned int, Branch> >),
- (Unsigned, ::std::vector< ::std::pair< Range<uint64_t>, Branch> >),
- (Signed, ::std::vector< ::std::pair< Range<int64_t>, Branch> >),
- (Float, ::std::vector< ::std::pair< Range<double>, Branch> >),
- (String, ::std::vector< ::std::pair< ::std::string, Branch> >),
- (Slice, struct {
- ::std::vector< ::std::pair< unsigned int, Branch> > fixed_arms;
- //::std::vector< ::std::pair< unsigned int, Branch> > variable_arms;
- })
- );
-
- // TODO: Arm specialisation?
- field_path_t m_field_path;
- Values m_branches;
- Branch m_default;
-
- DecisionTreeNode( field_path_t field_path ):
- // TODO: This is commented out fo a reason, but I don't know why.
- //m_field_path( mv$(field_path) ),
- m_branches(),
- m_default()
- {}
-
- static Branch clone(const Branch& b);
- static Values clone(const Values& x);
- DecisionTreeNode clone() const;
-
- void populate_tree_from_rule(const Span& sp, unsigned int arm_index, const PatternRule* first_rule, unsigned int rule_count) {
- populate_tree_from_rule(sp, first_rule, rule_count, [sp,arm_index](Branch& branch){
- TU_MATCHA( (branch), (e),
- (Unset,
- // Good
- ),
- (Subtree,
- if( e->m_branches.is_Unset() && e->m_default.is_Unset() ) {
- // Good.
- }
- else {
- BUG(sp, "Duplicate terminal - branch="<<branch);
- }
- ),
- (Terminal,
- // TODO: This is ok if it's due to overlapping rules (e.g. ranges)
- //BUG(sp, "Duplicate terminal - Existing goes to arm " << e << ", new goes to arm " << arm_index );
- )
- )
- branch = Branch::make_Terminal(arm_index);
- });
+ return rv;
}
- // `and_then` - Closure called after processing the final rule
- void populate_tree_from_rule(const Span& sp, const PatternRule* first_rule, unsigned int rule_count, ::std::function<void(Branch&)> and_then);
-
- /// Simplifies the tree by eliminating nodes that don't make a decision
- void simplify();
- /// Propagate the m_default arm's contents to value arms, and vice-versa
- void propagate_default();
- /// HELPER: Unfies the rules from the provided branch with this node
- void unify_from(const Branch& b);
-
- ::MIR::LValue get_field(const ::MIR::LValue& base, unsigned int base_depth) const {
- ::MIR::LValue cur = base.clone();
- for(unsigned int i = base_depth; i < m_field_path.size(); i ++ ) {
- const auto idx = m_field_path.data[i];
- if( idx == FIELD_DEREF ) {
- cur = ::MIR::LValue::make_Deref({ box$(cur) });
+ void push_arm(const ::std::vector<PatternRule>& x, size_t arm_idx, size_t pat_idx)
+ {
+ assert(is_arm_indexes);
+ rule_sets.push_back(&x);
+ assert(arm_idx <= 0xFFF);
+ assert(pat_idx <= 0xFFF);
+ arm_idxes.push_back(arm_idx | (pat_idx << 12));
+ }
+ void push_bb(const ::std::vector<PatternRule>& x, ::MIR::BasicBlockId bb)
+ {
+ assert(!is_arm_indexes);
+ rule_sets.push_back(&x);
+ arm_idxes.push_back(bb);
+ }
+
+ friend ::std::ostream& operator<<(::std::ostream& os, const t_rules_subset& x) {
+ os << "t_rules_subset{";
+ for(size_t i = 0; i < x.rule_sets.size(); i ++)
+ {
+ if(i != 0)
+ os << ", ";
+ os << "[";
+ if(x.is_arm_indexes)
+ {
+ os << (x.arm_idxes[i] & 0xFFF) << "," << (x.arm_idxes[i] >> 12);
}
- else {
- cur = ::MIR::LValue::make_Field({ box$(cur), idx });
+ else
+ {
+ os << "bb" << x.arm_idxes[i];
}
+ os << "]";
+ os << ": " << *x.rule_sets[i];
}
- return cur;
+ os << "}";
+ return os;
}
-
- friend ::std::ostream& operator<<(::std::ostream& os, const Branch& x);
- friend ::std::ostream& operator<<(::std::ostream& os, const DecisionTreeNode& x);
};
-struct DecisionTreeGen
+class MatchGenGrouped
{
+ const Span& sp;
MirBuilder& m_builder;
- const ::std::vector< ::MIR::BasicBlockId>& m_rule_blocks;
-
- DecisionTreeGen(MirBuilder& builder, const ::std::vector< ::MIR::BasicBlockId >& rule_blocks):
- m_builder( builder ),
- m_rule_blocks( rule_blocks )
- {}
-
- ::MIR::BasicBlockId get_block_for_rule(unsigned int rule_index) {
- return m_rule_blocks.at( rule_index );
- }
-
- void generate_tree_code(const Span& sp, const DecisionTreeNode& node, const ::HIR::TypeRef& ty, const ::MIR::LValue& val) {
- generate_tree_code(sp, node, ty, 0, val, [&](const auto& n){
- DEBUG("node = " << n);
- // - Recurse on this method
- this->generate_tree_code(sp, n, ty, val);
- });
- }
- void generate_tree_code(
- const Span& sp,
- const DecisionTreeNode& node,
- const ::HIR::TypeRef& ty, unsigned int path_ofs, const ::MIR::LValue& base_val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- );
-
- void generate_branch(const DecisionTreeNode::Branch& branch, ::std::function<void(const DecisionTreeNode&)> cb);
-
- // HELPER
- ::MIR::LValue push_compare(const Span& sp, ::MIR::LValue left, ::MIR::eBinOp op, ::MIR::Param right);
-
- void generate_branches_Signed(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_Signed& branches,
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- );
- void generate_branches_Unsigned(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_Unsigned& branches,
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- );
- void generate_branches_Float(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_Float& branches,
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- );
- void generate_branches_Char(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_Unsigned& branches,
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- );
- void generate_branches_Bool(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_Bool& branches,
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- );
- void generate_branches_Borrow_str(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_String& branches,
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- );
-
- void generate_branches_Enum(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_Variant& branches,
- const field_path_t& field_path, // used to know when to stop handling sub-nodes
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- );
- void generate_branches_Slice(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_Slice& branches,
- const field_path_t& field_path,
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- );
- void generate_tree_code__enum(
- const Span& sp,
- const DecisionTreeNode& node, const ::HIR::TypeRef& fake_ty, const ::MIR::LValue& val,
- const field_path_t& path_prefix,
- ::std::function<void(const DecisionTreeNode&)> and_then
- );
-};
-
-void MIR_LowerHIR_Match_DecisionTree( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNode_Match& node, ::MIR::LValue match_val, t_arm_rules arm_rules, ::std::vector<ArmCode> arms_code, ::MIR::BasicBlockId first_cmp_block )
-{
- TRACE_FUNCTION;
-
- // XXX XXX XXX: The current codegen (below) will generate incorrect code if ordering matters.
- // ```
- // match ("foo", "bar")
- // {
- // (_, "bar") => {}, // Expected
- // ("foo", _) => {}, // Actual
- // _ => {},
- // }
- // ```
-
- // TODO: Sort the columns in `arm_rules` to ensure that the most specific rule is parsed first.
- // - Ordering within a pattern doesn't matter, only the order of arms matters.
- // - This sort could be designed such that the above case would match correctly?
-
- DEBUG("- Generating rule bindings");
- ::std::vector< ::MIR::BasicBlockId> rule_blocks;
- for(const auto& rule : arm_rules)
+ const ::HIR::TypeRef& m_top_ty;
+ const ::MIR::LValue& m_top_val;
+ const ::std::vector<ArmCode>& m_arms_code;
+
+ size_t m_field_path_ofs;
+public:
+ MatchGenGrouped(MirBuilder& builder, const Span& sp, const ::HIR::TypeRef& top_ty, const ::MIR::LValue& top_val, const ::std::vector<ArmCode>& arms_code, size_t field_path_ofs):
+ sp(sp),
+ m_builder(builder),
+ m_top_ty(top_ty),
+ m_top_val(top_val),
+ m_arms_code(arms_code),
+ m_field_path_ofs(field_path_ofs)
{
- const auto& arm_code = arms_code[rule.arm_idx];
- ASSERT_BUG(node.span(), !arm_code.has_condition, "Decision tree doesn't (yet) support conditionals");
-
- assert( rule.pat_idx < arm_code.destructures.size() );
- // Set the target for when a rule succeeds to the destructuring code for this rule
- rule_blocks.push_back( arm_code.destructures[rule.pat_idx] );
- // - Tie the end of that block to the code block for this arm
- builder.set_cur_block( rule_blocks.back() );
- builder.end_block( ::MIR::Terminator::make_Goto(arm_code.code) );
}
+ void gen_for_slice(t_rules_subset rules, size_t ofs, ::MIR::BasicBlockId default_arm);
+ void gen_dispatch(const ::std::vector<t_rules_subset>& rules, size_t ofs, const ::std::vector<::MIR::BasicBlockId>& arm_targets, ::MIR::BasicBlockId def_blk);
+ void gen_dispatch__primitive(::HIR::TypeRef ty, ::MIR::LValue val, const ::std::vector<t_rules_subset>& rules, size_t ofs, const ::std::vector<::MIR::BasicBlockId>& arm_targets, ::MIR::BasicBlockId def_blk);
+ void gen_dispatch__enum(::HIR::TypeRef ty, ::MIR::LValue val, const ::std::vector<t_rules_subset>& rules, size_t ofs, const ::std::vector<::MIR::BasicBlockId>& arm_targets, ::MIR::BasicBlockId def_blk);
+ void gen_dispatch__slice(::HIR::TypeRef ty, ::MIR::LValue val, const ::std::vector<t_rules_subset>& rules, size_t ofs, const ::std::vector<::MIR::BasicBlockId>& arm_targets, ::MIR::BasicBlockId def_blk);
- // - Build tree by running each arm's pattern across it
- DEBUG("- Building decision tree");
- DecisionTreeNode root_node({});
- unsigned int rule_idx = 0;
- for( const auto& arm_rule : arm_rules )
- {
- auto arm_idx = arm_rule.arm_idx;
- DEBUG("(" << arm_idx << ", " << arm_rule.pat_idx << "): " << arm_rule.m_rules);
- root_node.populate_tree_from_rule( node.m_arms[arm_idx].m_code->span(), rule_idx, arm_rule.m_rules.data(), arm_rule.m_rules.size() );
- rule_idx += 1;
- }
- DEBUG("root_node = " << root_node);
- root_node.simplify();
- DEBUG("root_node = " << root_node);
- root_node.propagate_default();
- DEBUG("root_node = " << root_node);
- // TODO: Pretty print `root_node`
-
- // - Convert the above decision tree into MIR
- DEBUG("- Emitting decision tree");
- DecisionTreeGen gen { builder, rule_blocks };
- builder.set_cur_block( first_cmp_block );
- gen.generate_tree_code( node.span(), root_node, node.m_value->m_res_type, mv$(match_val) );
- ASSERT_BUG(node.span(), !builder.block_active(), "Decision tree didn't terminate the final block");
-}
+ void gen_dispatch_range(const field_path_t& field_path, const ::MIR::Constant& first, const ::MIR::Constant& last, ::MIR::BasicBlockId def_blk);
+ void gen_dispatch_splitslice(const field_path_t& field_path, const PatternRule::Data_SplitSlice& e, ::MIR::BasicBlockId def_blk);
-#if 0
-DecisionTreeNode MIR_LowerHIR_Match_DecisionTree__MakeTree(const Span& sp, t_arm_rules& arm_rules)
-{
- ::std::vector<unsigned int> indexes;
- ::std::vector< slice<PatternRule> > rules;
- for(unsigned i = 0; i < arm_rules.size(); i ++)
+ ::MIR::LValue push_compare(::MIR::LValue left, ::MIR::eBinOp op, ::MIR::Param right)
{
- rules.push_back( arm_rules[i].m_rules );
- indexes.push_back(i);
+ return m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool,
+ ::MIR::RValue::make_BinOp({ mv$(left), op, mv$(right) })
+ );
}
+};
- return MIR_LowerHIR_Match_DecisionTree__MakeTree_Node(sp, indexes, rules);
-}
-DecisionTreeNode MIR_LowerHIR_Match_DecisionTree__MakeTree_Node(const Span& sp, slice<unsigned int> arm_indexes, slice< slice<PaternRule>> arm_rules)
-{
- assert( arm_indexes.size() == arm_rules.size() );
- assert( arm_rules.size() > 1 );
- assert( arm_rules[0].size() > 0 );
-
- // 1. Sort list (should it already be sorted?)
- for(const auto& rules : arm_rules)
+namespace {
+ void push_flat_rules(::std::vector<PatternRule>& out_rules, PatternRule rule)
{
- ASSERT_BUG(sp, rules.size() != arm_rules[0].size(), "");
+ TU_MATCHA( (rule), (e),
+ (Variant,
+ auto sub_rules = mv$(e.sub_rules);
+ out_rules.push_back( mv$(rule) );
+ for(auto& sr : sub_rules)
+ push_flat_rules(out_rules, mv$(sr));
+ ),
+ (Slice,
+ auto sub_rules = mv$(e.sub_rules);
+ out_rules.push_back( mv$(rule) );
+ for(auto& sr : sub_rules)
+ push_flat_rules(out_rules, mv$(sr));
+ ),
+ (SplitSlice,
+ auto leading = mv$(e.leading);
+ auto trailing = mv$(e.trailing);
+ out_rules.push_back( mv$(rule) );
+ for(auto& sr : leading)
+ push_flat_rules(out_rules, mv$(sr));
+ // TODO: the trailing rules need a special path format.
+ ASSERT_BUG(Span(), trailing.size() == 0, "TODO: Handle SplitSlice with trailing");
+ for(auto& sr : trailing)
+ push_flat_rules(out_rules, mv$(sr));
+ ),
+ (Bool,
+ out_rules.push_back( mv$(rule) );
+ ),
+ (Value,
+ out_rules.push_back( mv$(rule) );
+ ),
+ (ValueRange,
+ out_rules.push_back( mv$(rule) );
+ ),
+ (Any,
+ out_rules.push_back( mv$(rule) );
+ )
+ )
}
-
- // 2. Detect all arms being `_` and move on to the next condition
- while( ::std::all_of(arm_rules.begin(), arm_rules.end(), [](const auto& r){ return r.m_rules[0].is_Any(); }) )
+ t_arm_rules flatten_rules(t_arm_rules rules)
{
- // Delete first rule from all and continue.
- if( arm_rules[0].size() == 1 ) {
- // No rules left?
- BUG(sp, "Duplicate match arms");
- }
-
- for(auto& rules : arm_rules)
+ t_arm_rules rv;
+ rv.reserve(rules.size());
+ for(auto& ruleset : rules)
{
- rules = rules.subslice_from(1);
+ ::std::vector<PatternRule> pattern_rules;
+ for( auto& r : ruleset.m_rules )
+ {
+ push_flat_rules(pattern_rules, mv$(r));
+ }
+ rv.push_back(PatternRuleset { ruleset.arm_idx, ruleset.pat_idx, mv$(pattern_rules) });
}
+ return rv;
}
+}
- // We have a codition.
- for(const auto& rules : arm_rules)
- {
- ASSERT_BUG(sp, rules[0].is_Any() || rules[0].tag() == arm_rules[0][0].tag(), "Mismatched rules in match");
- }
-
- bool has_any = arm_rules.back()[0].is_Any();
+void MIR_LowerHIR_Match_Grouped(
+ MirBuilder& builder, MirConverter& conv, ::HIR::ExprNode_Match& node, ::MIR::LValue match_val,
+ t_arm_rules arm_rules, ::std::vector<ArmCode> arms_code, ::MIR::BasicBlockId first_cmp_block
+ )
+{
+ // TEMPORARY HACK: Grouped fails in complex matches (e.g. librustc_const_math Int::infer)
+ //MIR_LowerHIR_Match_Simple( builder, conv, node, mv$(match_val), mv$(arm_rules), mv$(arms_code), first_cmp_block );
+ //return;
- // All rules must either be _ or the same type, and can't all be _
- switch( arm_rules[0][0].tag() )
- {
- case PatternRule::TAGDEAD: throw "";
- case PatternRule::TAG_Any: throw "";
+ TRACE_FUNCTION_F("");
- case PatternRule::TAG_Variant:
- break;
- // TODO: Value and ValueRange can appear together.
- // - They also overlap in non-trivial ways.
- }
-}
-#endif
-
-// ----------------------------
-// DecisionTreeNode
-// ----------------------------
-DecisionTreeNode::Branch DecisionTreeNode::clone(const DecisionTreeNode::Branch& b) {
- TU_MATCHA( (b), (e),
- (Unset, return Branch(e); ),
- (Subtree, return Branch(box$( e->clone() )); ),
- (Terminal, return Branch(e); )
- )
- throw "";
-}
-DecisionTreeNode::Values DecisionTreeNode::clone(const DecisionTreeNode::Values& x) {
- TU_MATCHA( (x), (e),
- (Unset, return Values(e); ),
- (Bool,
- return Values::make_Bool({ clone(e.false_branch), clone(e.true_branch) });
- ),
- (Variant,
- Values::Data_Variant rv;
- rv.reserve(e.size());
- for(const auto& v : e)
- rv.push_back( ::std::make_pair(v.first, clone(v.second)) );
- return Values( mv$(rv) );
- ),
- (Unsigned,
- Values::Data_Unsigned rv;
- rv.reserve(e.size());
- for(const auto& v : e)
- rv.push_back( ::std::make_pair(v.first, clone(v.second)) );
- return Values( mv$(rv) );
- ),
- (Signed,
- Values::Data_Signed rv;
- rv.reserve(e.size());
- for(const auto& v : e)
- rv.push_back( ::std::make_pair(v.first, clone(v.second)) );
- return Values( mv$(rv) );
- ),
- (Float,
- Values::Data_Float rv;
- rv.reserve(e.size());
- for(const auto& v : e)
- rv.push_back( ::std::make_pair(v.first, clone(v.second)) );
- return Values( mv$(rv) );
- ),
- (String,
- Values::Data_String rv;
- rv.reserve(e.size());
- for(const auto& v : e)
- rv.push_back( ::std::make_pair(v.first, clone(v.second)) );
- return Values( mv$(rv) );
- ),
- (Slice,
- Values::Data_Slice rv;
- rv.fixed_arms.reserve(e.fixed_arms.size());
- for(const auto& v : e.fixed_arms)
- rv.fixed_arms.push_back( ::std::make_pair(v.first, clone(v.second)) );
- return Values( mv$(rv) );
- )
- )
- throw "";
-}
-DecisionTreeNode DecisionTreeNode::clone() const {
- DecisionTreeNode rv(m_field_path);
- rv.m_field_path = m_field_path;
- rv.m_branches = clone(m_branches);
- rv.m_default = clone(m_default);
- return rv;
-}
+ // Flatten ruleset completely (remove grouping of enum/slice rules)
+ arm_rules = flatten_rules( mv$(arm_rules) );
-// Helpers for `populate_tree_from_rule`
-namespace
-{
- DecisionTreeNode::Branch new_branch_subtree(field_path_t path)
+ // - Create a "slice" of the passed rules, suitable for passing to the recursive part of the algo
+ t_rules_subset rules { arm_rules.size(), /*is_arm_indexes=*/true };
+ for(const auto& r : arm_rules)
{
- return DecisionTreeNode::Branch( box$(DecisionTreeNode( mv$(path) )) );
+ rules.push_arm( r.m_rules, r.arm_idx, r.pat_idx );
}
- // Common code for numerics (Int, Uint, and Float)
- template<typename T>
- static void from_rule_value(
- const Span& sp,
- ::std::vector< ::std::pair< DecisionTreeNode::Range<T>, DecisionTreeNode::Branch> >& be, T ve,
- const char* name, const field_path_t& field_path, ::std::function<void(DecisionTreeNode::Branch&)> and_then
- )
- {
- auto it = ::std::find_if(be.begin(), be.end(), [&](const auto& v){ return v.first.end >= ve; });
- if( it == be.end() || it->first.start > ve ) {
- it = be.insert( it, ::std::make_pair( DecisionTreeNode::Range<T> { ve,ve }, new_branch_subtree(field_path) ) );
- }
- else if( it->first.start == ve && it->first.end == ve ) {
- // Equal, continue and add sub-pat
- }
- else {
- // Collide or overlap!
- TODO(sp, "Value patterns - " << name << " - Overlapping - " << it->first.start << " <= " << ve << " <= " << it->first.end);
- }
- and_then( it->second );
- }
+ auto inst = MatchGenGrouped { builder, node.span(), node.m_value->m_res_type, match_val, arms_code, 0 };
- template<typename T>
- static void from_rule_valuerange(
- const Span& sp,
- ::std::vector< ::std::pair< DecisionTreeNode::Range<T>, DecisionTreeNode::Branch> >& be, T ve_start, T ve_end,
- const char* name, const field_path_t& field_path, ::std::function<void(DecisionTreeNode::Branch&)> and_then
- )
- {
- TRACE_FUNCTION_F("be=[" << FMT_CB(os, for(const auto& i:be) os << i.first <<" , ";) << "], new=" << ve_start << "..." << ve_end);
- ASSERT_BUG(sp, ve_start <= ve_end, "Range pattern with a start after the end - " << ve_start << "..." << ve_end);
+ // NOTE: This block should never be used
+ auto default_arm = builder.new_bb_unlinked();
- if( ve_start == ve_end ) {
- from_rule_value(sp, be, ve_start, name, field_path, and_then);
- return ;
- }
- // - Find the first entry that ends after the new one starts.
- auto it = ::std::find_if(be.begin(), be.end(), [&](const auto& v){ return v.first.end >= ve_start; });
- while(ve_start < ve_end)
- {
- if( it == be.end() ) {
- DEBUG("new = (" << ve_start << "..." << ve_end << "), exist=END");
- it = be.insert( it, ::std::make_pair( DecisionTreeNode::Range<T> { ve_start,ve_end }, new_branch_subtree(field_path) ) );
- and_then(it->second);
- return ;
- }
- DEBUG("new = (" << ve_start << "..." << ve_end << "), exist=" << it->first);
- // If the located entry starts after the end of this range
- if( it->first.start >= ve_end ) {
- DEBUG("- New free");
- it = be.insert( it, ::std::make_pair( DecisionTreeNode::Range<T> { ve_start,ve_end }, new_branch_subtree(field_path) ) );
- and_then(it->second);
- return ;
- }
- // If this range is equal to the existing, just recurse into it
- else if( it->first.start == ve_start && it->first.end == ve_end ) {
- DEBUG("- Equal");
- and_then(it->second);
- return ;
- }
- // If the new range starts before the start of this range, add a new entry before the existing one
- else if( it->first.start > ve_start ) {
- DEBUG("- New head, continue");
- it = be.insert( it, ::std::make_pair( DecisionTreeNode::Range<T> { ve_start,it->first.start-1 }, new_branch_subtree(field_path) ) );
- and_then(it->second);
- ++ it;
- ve_start = it->first.start;
- }
- // If the new range ends before the end of this range, split the existing range and recurse into the first
- else if( it->first.end > ve_end ) {
- DEBUG("- Inner");
- assert(ve_start == it->first.start);
- it = be.insert( it, ::std::make_pair( DecisionTreeNode::Range<T> { ve_start, ve_end }, DecisionTreeNode::clone(it->second) ) );
- and_then(it->second);
- (it+1)->first.start = ve_end+1;
- return ;
- }
- // (else) if the new range ends after the end of this range, apply to the rest of this range and advance
- else {
- DEBUG("- Shared head, continue");
- //assert(it->first.start == ve_start);
- assert((it->first.end) < ve_end);
+ builder.set_cur_block( first_cmp_block );
+ inst.gen_for_slice( mv$(rules), 0, default_arm );
- if( it->first.start != it->first.end )
- and_then(it->second);
- ve_start = it->first.end + 1;
- ++ it;
- }
- }
- }
+ // Make the default infinite loop.
+ // - Preferably, it'd abort.
+ builder.set_cur_block(default_arm);
+ builder.end_block( ::MIR::Terminator::make_Goto(default_arm) );
}
-void DecisionTreeNode::populate_tree_from_rule(const Span& sp, const PatternRule* first_rule, unsigned int rule_count, ::std::function<void(Branch&)> and_then)
+void MatchGenGrouped::gen_for_slice(t_rules_subset arm_rules, size_t ofs, ::MIR::BasicBlockId default_arm)
{
- assert( rule_count > 0 );
- const auto& rule = *first_rule;
-
- if( m_field_path.size() == 0 ) {
- m_field_path = rule.field_path;
- }
- else {
- ASSERT_BUG(sp, m_field_path == rule.field_path, "Patterns with mismatched field paths - " << m_field_path << " != " << rule.field_path);
- }
+ TRACE_FUNCTION_F("arm_rules=" << arm_rules << ", ofs="<<ofs << ", default_arm=" << default_arm);
+ ASSERT_BUG(sp, arm_rules.size() > 0, "");
- struct GET_BRANCHES_H {
- static void unexpected_type(const Span& sp, const char* exp, const char* have) {
- BUG(sp, "Mismatched rules - have " << exp << ", but have seen " << have);
- }
- };
- #define GET_BRANCHES(fld, var) (fld.is_Unset()\
- ? (fld = Values::make_##var({})).as_##var() \
- : (fld.is_##var() \
- ? fld.as_##var() : (GET_BRANCHES_H::unexpected_type(sp, #var, fld.tag_str()), fld.as_##var()) \
- )\
- )
-
-
- TU_MATCHA( (rule), (e),
- (Any, {
- if( rule_count == 1 )
+ // Quick hack: Skip any layers entirely made up of PatternRule::Any
+ for(;;)
+ {
+ bool is_all_any = true;
+ for(size_t i = 0; i < arm_rules.size() && is_all_any; i ++)
{
- ASSERT_BUG(sp, !m_default.is_Terminal(), "Duplicate terminal rule");
- and_then(m_default);
+ if( arm_rules[i].size() <= ofs )
+ is_all_any = false;
+ else if( ! arm_rules[i][ofs].is_Any() )
+ is_all_any = false;
}
- else
+ if( ! is_all_any )
{
- if( m_default.is_Unset() ) {
- m_default = new_branch_subtree(rule.field_path);
- m_default.as_Subtree()->populate_tree_from_rule(sp, first_rule+1, rule_count-1, and_then);
- }
- else TU_IFLET( Branch, m_default, Subtree, be,
- be->populate_tree_from_rule(sp, first_rule+1, rule_count-1, and_then);
- )
- else {
- // NOTE: All lists processed as part of the same tree should be the same length
- BUG(sp, "Duplicate terminal rule");
- }
- }
- // TODO: Should this also recurse into branches?
- }),
- (Variant, {
- auto& be = GET_BRANCHES(m_branches, Variant);
-
- auto it = ::std::find_if( be.begin(), be.end(), [&](const auto& x){ return x.first >= e.idx; });
- // If this variant isn't yet processed, add a new subtree for it
- if( it == be.end() || it->first != e.idx ) {
- it = be.insert(it, ::std::make_pair(e.idx, new_branch_subtree(rule.field_path)));
- assert( it->second.is_Subtree() );
- }
- else {
- if( it->second.is_Terminal() ) {
- BUG(sp, "Duplicate terminal rule - " << it->second.as_Terminal());
- }
- assert( !it->second.is_Unset() );
- assert( it->second.is_Subtree() );
+ break ;
}
- auto& subtree = *it->second.as_Subtree();
+ ofs ++;
+ DEBUG("Skip to ofs=" << ofs);
+ }
- if( e.sub_rules.size() > 0 && rule_count > 1 )
- {
- subtree.populate_tree_from_rule(sp, e.sub_rules.data(), e.sub_rules.size(), [&](Branch& branch){
- TU_MATCH_DEF(Branch, (branch), (be),
- (
- BUG(sp, "Duplicate terminator");
- ),
- (Unset,
- branch = new_branch_subtree(rule.field_path);
- ),
- (Subtree,
- )
- )
- branch.as_Subtree()->populate_tree_from_rule(sp, first_rule+1, rule_count-1, and_then);
- });
- }
- else if( e.sub_rules.size() > 0)
- {
- subtree.populate_tree_from_rule(sp, e.sub_rules.data(), e.sub_rules.size(), and_then);
- }
- else if( rule_count > 1 )
- {
- subtree.populate_tree_from_rule(sp, first_rule+1, rule_count-1, and_then);
- }
- else
+ // Split current set of rules into groups based on _ patterns
+ for(size_t idx = 0; idx < arm_rules.size(); )
+ {
+ // Completed arms
+ while( idx < arm_rules.size() && arm_rules[idx].size() <= ofs )
{
- and_then(it->second);
- }
- }),
- (Slice,
- auto& be = GET_BRANCHES(m_branches, Slice);
+ auto next = idx+1 == arm_rules.size() ? default_arm : m_builder.new_bb_unlinked();
+ ASSERT_BUG(sp, arm_rules[idx].size() == ofs, "Offset too large for rule - ofs=" << ofs << ", rules=" << arm_rules[idx]);
+ DEBUG(idx << ": Complete");
+ // Emit jump to either arm code, or arm condition
+ if( arm_rules.is_arm() )
+ {
+ auto ai = arm_rules.arm_idx(idx);
+ ASSERT_BUG(sp, m_arms_code.size() > 0, "Bottom-level ruleset with no arm code information");
+ const auto& ac = m_arms_code[ai.first];
- auto it = ::std::find_if( be.fixed_arms.begin(), be.fixed_arms.end(), [&](const auto& x){ return x.first >= e.len; } );
- if( it == be.fixed_arms.end() || it->first != e.len ) {
- it = be.fixed_arms.insert(it, ::std::make_pair(e.len, new_branch_subtree(rule.field_path)));
- }
- else {
- if( it->second.is_Terminal() ) {
- BUG(sp, "Duplicate terminal rule - " << it->second.as_Terminal());
- }
- assert( !it->second.is_Unset() );
- }
- assert( it->second.is_Subtree() );
- auto& subtree = *it->second.as_Subtree();
+ m_builder.end_block( ::MIR::Terminator::make_Goto(ac.destructures[ai.second]) );
+ m_builder.set_cur_block( ac.destructures[ai.second] );
- if( e.sub_rules.size() > 0 && rule_count > 1 )
- {
- subtree.populate_tree_from_rule(sp, e.sub_rules.data(), e.sub_rules.size(), [&](Branch& branch){
- TU_MATCH_DEF(Branch, (branch), (be),
- (
- BUG(sp, "Duplicate terminator");
- ),
- (Unset,
- branch = new_branch_subtree(rule.field_path);
- ),
- (Subtree,
- )
- )
- branch.as_Subtree()->populate_tree_from_rule(sp, first_rule+1, rule_count-1, and_then);
- });
- }
- else if( e.sub_rules.size() > 0)
- {
- subtree.populate_tree_from_rule(sp, e.sub_rules.data(), e.sub_rules.size(), and_then);
- }
- else if( rule_count > 1 )
- {
- subtree.populate_tree_from_rule(sp, first_rule+1, rule_count-1, and_then);
- }
- else
- {
- and_then(it->second);
- }
- ),
- (SplitSlice,
- //auto& be = GET_BRANCHES(m_branches, Slice);
- TODO(sp, "SplitSlice in DTN - " << rule);
- ),
- (Bool,
- auto& be = GET_BRANCHES(m_branches, Bool);
+ if( ac.has_condition )
+ {
+ TODO(sp, "Handle conditionals in Grouped");
+ // TODO: If the condition fails, this should re-try the match on other rules that could have worked.
+ // - For now, conditionals are disabled.
- auto& branch = (e ? be.true_branch : be.false_branch);
- if( branch.is_Unset() ) {
- branch = new_branch_subtree( rule.field_path );
- }
- else if( branch.is_Terminal() ) {
- BUG(sp, "Duplicate terminal rule - " << branch.as_Terminal());
- }
- else {
- // Good.
- }
- if( rule_count > 1 )
- {
- auto& subtree = *branch.as_Subtree();
- subtree.populate_tree_from_rule(sp, first_rule+1, rule_count-1, and_then);
- }
- else
- {
- and_then(branch);
- }
- ),
- (Value,
- TU_MATCHA( (e), (ve),
- (Int,
- auto& be = GET_BRANCHES(m_branches, Signed);
-
- from_rule_value(sp, be, ve.v, "Signed", rule.field_path,
- [&](auto& branch) {
- if( rule_count > 1 ) {
- assert( branch.as_Subtree() );
- auto& subtree = *branch.as_Subtree();
- subtree.populate_tree_from_rule(sp, first_rule+1, rule_count-1, and_then);
- }
- else
- {
- and_then(branch);
- }
- });
- ),
- (Uint,
- auto& be = GET_BRANCHES(m_branches, Unsigned);
-
- from_rule_value(sp, be, ve.v, "Unsigned", rule.field_path,
- [&](auto& branch) {
- if( rule_count > 1 ) {
- assert( branch.as_Subtree() );
- auto& subtree = *branch.as_Subtree();
- subtree.populate_tree_from_rule(sp, first_rule+1, rule_count-1, and_then);
- }
- else
+ // TODO: What if there's multiple patterns on this condition?
+ // - For now, only the first pattern gets edited.
+ // - Maybe clone the blocks used for the condition?
+ m_builder.end_block( ::MIR::Terminator::make_Goto(ac.cond_start) );
+
+ // Check for marking in `ac` that the block has already been terminated, assert that target is `next`
+ if( ai.second == 0 )
{
- and_then(branch);
- }
- });
- ),
- (Float,
- auto& be = GET_BRANCHES(m_branches, Float);
-
- from_rule_value(sp, be, ve.v, "Float", rule.field_path,
- [&](auto& branch) {
- if( rule_count > 1 ) {
- assert( branch.as_Subtree() );
- auto& subtree = *branch.as_Subtree();
- subtree.populate_tree_from_rule(sp, first_rule+1, rule_count-1, and_then);
- }
- else {
- and_then(branch);
+ if( ac.cond_fail_tgt != 0)
+ {
+ ASSERT_BUG(sp, ac.cond_fail_tgt == next, "Condition fail target already set with mismatching arm, set to bb" << ac.cond_fail_tgt << " cur is bb" << next);
+ }
+ else
+ {
+ ac.cond_fail_tgt = next;
+
+ m_builder.set_cur_block( ac.cond_end );
+ m_builder.end_block( ::MIR::Terminator::make_If({ ac.cond_lval.clone(), ac.code, next }) );
+ }
}
- });
- ),
- (Bool,
- BUG(sp, "Hit Bool in PatternRule::Value - " << e);
- ),
- (Bytes,
- TODO(sp, "Value patterns - Bytes");
- ),
- (StaticString,
- auto& be = GET_BRANCHES(m_branches, String);
- auto it = ::std::find_if(be.begin(), be.end(), [&](const auto& v){ return v.first >= ve; });
- if( it == be.end() || it->first != ve ) {
- it = be.insert( it, ::std::make_pair(ve, new_branch_subtree(rule.field_path) ) );
- }
- auto& branch = it->second;
- if( rule_count > 1 )
- {
- assert( branch.as_Subtree() );
- auto& subtree = *branch.as_Subtree();
- subtree.populate_tree_from_rule(sp, first_rule+1, rule_count-1, and_then);
+ if( next != default_arm )
+ m_builder.set_cur_block(next);
+ }
+ else
+ {
+ m_builder.end_block( ::MIR::Terminator::make_Goto(ac.code) );
+ ASSERT_BUG(sp, idx+1 == arm_rules.size(), "Ended arm with other arms present");
+ }
}
else
{
- and_then(branch);
+ auto bb = arm_rules.bb_idx(idx);
+ m_builder.end_block( ::MIR::Terminator::make_Goto(bb) );
+ while( idx+1 < arm_rules.size() && bb == arm_rules.bb_idx(idx) && arm_rules[idx].size() == ofs )
+ idx ++;
+ ASSERT_BUG(sp, idx+1 == arm_rules.size(), "Ended arm (inner) with other arms present");
}
- ),
- (Const,
- BUG(sp, "Hit Const in PatternRule::Value - " << e);
- ),
- (ItemAddr,
- BUG(sp, "Hit ItemAddr in PatternRule::Value - " << e);
- )
- )
- ),
- (ValueRange,
-
- ASSERT_BUG(sp, e.first.tag() == e.last.tag(), "Constant type mismatch in ValueRange - " << e.first << " and " << e.last);
- TU_MATCHA( (e.first, e.last), (ve_start, ve_end),
- (Int,
- auto& be = GET_BRANCHES(m_branches, Signed);
- from_rule_valuerange(sp, be, ve_start.v, ve_end.v, "Signed", rule.field_path,
- [&](auto& branch) {
- if( rule_count > 1 )
- {
- assert( branch.as_Subtree() );
- auto& subtree = *branch.as_Subtree();
- subtree.populate_tree_from_rule(sp, first_rule+1, rule_count-1, and_then);
- }
- else
- {
- and_then(branch);
- }
- });
- ),
- (Uint,
- // TODO: Share code between the three numeric groups
- auto& be = GET_BRANCHES(m_branches, Unsigned);
- from_rule_valuerange(sp, be, ve_start.v, ve_end.v, "Unsigned", rule.field_path,
- [&](auto& branch) {
- if( rule_count > 1 )
- {
- assert( branch.as_Subtree() );
- auto& subtree = *branch.as_Subtree();
- subtree.populate_tree_from_rule(sp, first_rule+1, rule_count-1, and_then);
- }
- else
- {
- and_then(branch);
- }
- });
- ),
- (Float,
- auto& be = GET_BRANCHES(m_branches, Float);
- from_rule_valuerange(sp, be, ve_start.v, ve_end.v, "Float", rule.field_path,
- [&](auto& branch) {
- if( rule_count > 1 )
- {
- assert( branch.as_Subtree() );
- auto& subtree = *branch.as_Subtree();
- subtree.populate_tree_from_rule(sp, first_rule+1, rule_count-1, and_then);
- }
- else
- {
- and_then(branch);
- }
- });
- ),
- (Bool,
- BUG(sp, "Hit Bool in PatternRule::ValueRange - " << e.first);
- ),
- (Bytes,
- TODO(sp, "ValueRange patterns - Bytes");
- ),
- (StaticString,
- ERROR(sp, E0000, "Use of string in value range patter");
- ),
- (Const,
- BUG(sp, "Hit Const in PatternRule::ValueRange - " << e.first);
- ),
- (ItemAddr,
- BUG(sp, "Hit ItemAddr in PatternRule::ValueRange - " << e.first);
- )
- )
- )
- )
-}
-
-void DecisionTreeNode::simplify()
-{
- struct H {
- static void simplify_branch(Branch& b)
- {
- TU_IFLET(Branch, b, Subtree, be,
- be->simplify();
- if( be->m_branches.is_Unset() ) {
- auto v = mv$( be->m_default );
- b = mv$(v);
- }
- )
+ idx ++;
}
- };
- TU_MATCHA( (m_branches), (e),
- (Unset,
- H::simplify_branch(m_default);
- // Replace `this` with `m_default` if `m_default` is a subtree
- // - Fixes the edge case for the top of the tree
- if( m_default.is_Subtree() )
+ // - Value arms
+ auto start = idx;
+ for(; idx < arm_rules.size() ; idx ++)
{
- *this = mv$(*m_default.as_Subtree());
- }
- return ;
- ),
- (Bool,
- H::simplify_branch(e.false_branch);
- H::simplify_branch(e.true_branch);
- ),
- (Variant,
- for(auto& branch : e) {
- H::simplify_branch(branch.second);
- }
- ),
- (Unsigned,
- for(auto& branch : e) {
- H::simplify_branch(branch.second);
- }
- ),
- (Signed,
- for(auto& branch : e) {
- H::simplify_branch(branch.second);
- }
- ),
- (Float,
- for(auto& branch : e) {
- H::simplify_branch(branch.second);
- }
- ),
- (String,
- for(auto& branch : e) {
- H::simplify_branch(branch.second);
- }
- ),
- (Slice,
- for(auto& branch : e.fixed_arms) {
- H::simplify_branch(branch.second);
+ if( arm_rules[idx].size() <= ofs )
+ break;
+ if( arm_rules[idx][ofs].is_Any() )
+ break;
+ if( arm_rules[idx][ofs].is_SplitSlice() )
+ break;
+ // TODO: It would be nice if ValueRange could be combined with Value (if there's no overlap)
+ if( arm_rules[idx][ofs].is_ValueRange() )
+ break;
}
- )
- )
+ auto first_any = idx;
- H::simplify_branch(m_default);
-}
+ // Generate dispatch based on the above list
+ // - If there's value ranges they need special handling
+ // - Can sort arms within this group (ordering doesn't matter, as long as ranges are handled)
+ // - Sort must be stable.
-void DecisionTreeNode::propagate_default()
-{
- TRACE_FUNCTION_FR(*this, *this);
- struct H {
- static void handle_branch(Branch& b, const Branch& def) {
- TU_IFLET(Branch, b, Subtree, be,
- be->propagate_default();
- if( !def.is_Unset() )
+ if( start < first_any )
+ {
+ DEBUG(start << "+" << (first_any-start) << ": Values");
+ bool has_default = (first_any < arm_rules.size());
+ auto next = (has_default ? m_builder.new_bb_unlinked() : default_arm);
+
+ // Sort rules before getting compatible runs
+ // TODO: Is this a valid operation?
+ arm_rules.sub_sort(ofs, start, first_any - start);
+
+ // Create list of compatible arm slices (runs with the same selector value)
+ ::std::vector<t_rules_subset> slices;
+ auto cur_test = start;
+ for(auto i = start; i < first_any; i ++)
+ {
+ // Just check if the decision value differs (don't check nested rules)
+ if( ! rule_compatible(arm_rules[i][ofs], arm_rules[cur_test][ofs]) )
{
- DEBUG("Unify " << *be << " with " << def);
- be->unify_from(def);
- be->propagate_default();
+ slices.push_back( arm_rules.sub_slice(cur_test, i - cur_test) );
+ cur_test = i;
}
- )
- }
- };
+ }
+ slices.push_back( arm_rules.sub_slice(cur_test, first_any - cur_test) );
+ DEBUG("- " << slices.size() << " groupings");
+ ::std::vector<::MIR::BasicBlockId> arm_blocks;
+ arm_blocks.reserve( slices.size() );
- TU_MATCHA( (m_branches), (e),
- (Unset,
- ),
- (Bool,
- DEBUG("- false");
- H::handle_branch(e.false_branch, m_default);
- DEBUG("- true");
- H::handle_branch(e.true_branch, m_default);
- ),
- (Variant,
- for(auto& branch : e) {
- DEBUG("- V " << branch.first);
- H::handle_branch(branch.second, m_default);
- }
- ),
- (Unsigned,
- for(auto& branch : e) {
- DEBUG("- U " << branch.first);
- H::handle_branch(branch.second, m_default);
- }
- ),
- (Signed,
- for(auto& branch : e) {
- DEBUG("- S " << branch.first);
- H::handle_branch(branch.second, m_default);
- }
- ),
- (Float,
- for(auto& branch : e) {
- DEBUG("- " << branch.first);
- H::handle_branch(branch.second, m_default);
- }
- ),
- (String,
- for(auto& branch : e) {
- DEBUG("- '" << branch.first << "'");
- H::handle_branch(branch.second, m_default);
- }
- ),
- (Slice,
- for(auto& branch : e.fixed_arms) {
- DEBUG("- [_;" << branch.first << "]");
- H::handle_branch(branch.second, m_default);
- }
- )
- )
- DEBUG("- default");
- TU_IFLET(Branch, m_default, Subtree, be,
- be->propagate_default();
-
- if( be->m_default.is_Unset() ) {
- // Propagate default from value branches
- TU_MATCHA( (m_branches), (e),
- (Unset,
- ),
- (Bool,
- be->unify_from(e.false_branch);
- be->unify_from(e.true_branch);
- ),
- (Variant,
- for(auto& branch : e) {
- be->unify_from(branch.second);
- }
- ),
- (Unsigned,
- for(auto& branch : e) {
- be->unify_from(branch.second);
- }
- ),
- (Signed,
- for(auto& branch : e) {
- be->unify_from(branch.second);
- }
- ),
- (Float,
- for(auto& branch : e) {
- be->unify_from(branch.second);
- }
- ),
- (String,
- for(auto& branch : e) {
- be->unify_from(branch.second);
- }
- ),
- (Slice,
- for(auto& branch : e.fixed_arms) {
- be->unify_from(branch.second);
- }
- )
- )
- }
- )
-}
+ auto cur_blk = m_builder.pause_cur_block();
+ // > Stable sort list
+ ::std::sort( slices.begin(), slices.end(), [&](const auto& a, const auto& b){ return a[0][ofs] < b[0][ofs]; } );
+ // TODO: Should this do a stable sort of inner patterns too?
+ // - A sort of inner patterns such that `_` (and range?) patterns don't change position.
-namespace {
- static void unify_branch(DecisionTreeNode::Branch& dst, const DecisionTreeNode::Branch& src) {
- if( dst.is_Unset() ) {
- dst = DecisionTreeNode::clone(src);
- }
- else if( dst.is_Subtree() ) {
- dst.as_Subtree()->unify_from(src);
- }
- else {
- // Terminal, no unify
- }
- }
+ // > Get type of match, generate dispatch list.
+ for(size_t i = 0; i < slices.size(); i ++)
+ {
+ auto cur_block = m_builder.new_bb_unlinked();
+ m_builder.set_cur_block(cur_block);
- template<typename T>
- void unify_from_vals_range(::std::vector< ::std::pair<T, DecisionTreeNode::Branch>>& dst, const ::std::vector< ::std::pair<T, DecisionTreeNode::Branch>>& src)
- {
- for(const auto& srcv : src)
- {
- // Find the first entry with an end greater than or equal to the start of this entry
- auto it = ::std::find_if( dst.begin(), dst.end(), [&](const auto& x){ return x.first.end >= srcv.first.start; });
- // Not found? Insert a new branch
- if( it == dst.end() ) {
- it = dst.insert(it, ::std::make_pair(srcv.first, DecisionTreeNode::clone(srcv.second)));
- }
- // If the found entry doesn't overlap (the start of `*it` is after the end of `srcv`)
- else if( it->first.start > srcv.first.end ) {
- it = dst.insert(it, ::std::make_pair(srcv.first, DecisionTreeNode::clone(srcv.second)));
- }
- else if( it->first == srcv.first ) {
- unify_branch( it->second, srcv.second );
- }
- else {
- // NOTE: Overlapping doesn't get handled here
- }
- }
- }
+ for(size_t j = 0; j < slices[i].size(); j ++)
+ {
+ if(j > 0)
+ ASSERT_BUG(sp, slices[i][0][ofs] == slices[i][j][ofs], "Mismatched rules - " << slices[i][0][ofs] << " and " << slices[i][j][ofs]);
+ arm_blocks.push_back(cur_block);
+ }
- template<typename T>
- void unify_from_vals_pt(::std::vector< ::std::pair<T, DecisionTreeNode::Branch>>& dst, const ::std::vector< ::std::pair<T, DecisionTreeNode::Branch>>& src)
- {
- // Insert items not already present, merge present items
- for(const auto& srcv : src)
- {
- auto it = ::std::find_if( dst.begin(), dst.end(), [&](const auto& x){ return x.first >= srcv.first; });
- // Not found? Insert a new branch
- if( it == dst.end() || it->first != srcv.first ) {
- it = dst.insert(it, ::std::make_pair(srcv.first, DecisionTreeNode::clone(srcv.second)));
- }
- else {
- unify_branch( it->second, srcv.second );
+ this->gen_for_slice(slices[i], ofs+1, next);
}
- }
- }
-}
-void DecisionTreeNode::unify_from(const Branch& b)
-{
- TRACE_FUNCTION_FR(*this << " with " << b, *this);
+ m_builder.set_cur_block(cur_blk);
- assert( b.is_Terminal() || b.is_Subtree() );
+ // Generate decision code
+ this->gen_dispatch(slices, ofs, arm_blocks, next);
- if( m_default.is_Unset() ) {
- if( b.is_Terminal() ) {
- m_default = clone(b);
- }
- else {
- m_default = clone(b.as_Subtree()->m_default);
+ if(has_default)
+ {
+ m_builder.set_cur_block(next);
+ }
}
- }
- if( b.is_Subtree() && b.as_Subtree()->m_branches.tag() != m_branches.tag() ) {
- // Is this a bug, or expected (and don't unify in?)
- DEBUG("TODO - Unify mismatched arms? - " << b.as_Subtree()->m_branches.tag_str() << " and " << m_branches.tag_str());
- return ;
- }
- bool should_unify_subtree = b.is_Subtree() && this->m_field_path == b.as_Subtree()->m_field_path;
- //if( b.is_Subtree() ) {
- // ASSERT_BUG(Span(), this->m_field_path == b.as_Subtree()->m_field_path, "Unifiying DTNs with mismatched paths - " << this->m_field_path << " != " << b.as_Subtree()->m_field_path);
- //}
+ // Collate matching blocks at `first_any`
+ assert(first_any == idx);
+ if( first_any < arm_rules.size() && arm_rules[idx].size() > ofs )
+ {
+ // Collate all equal rules
+ while(idx < arm_rules.size() && arm_rules[idx][ofs] == arm_rules[first_any][ofs])
+ idx ++;
+ DEBUG(first_any << "-" << idx << ": Multi-match");
- TU_MATCHA( (m_branches), (dst),
- (Unset,
- if( b.is_Subtree() ) {
- assert( b.as_Subtree()->m_branches.is_Unset() );
- }
- else {
- // Huh? Terminal matching against an unset branch?
- }
- ),
- (Bool,
- auto* src = (b.is_Subtree() ? &b.as_Subtree()->m_branches.as_Bool() : nullptr);
+ bool has_next = idx < arm_rules.size();
+ auto next = (has_next ? m_builder.new_bb_unlinked() : default_arm);
- unify_branch( dst.false_branch, (src ? src->false_branch : b) );
- unify_branch( dst.true_branch , (src ? src->true_branch : b) );
- ),
- (Variant,
- if( should_unify_subtree ) {
- auto& sb = b.as_Subtree()->m_branches;
- ASSERT_BUG(Span(), sb.is_Variant(), "Unifying Variant with " << sb.tag_str());
- unify_from_vals_pt(dst, sb.as_Variant());
- }
- else {
- // Unify all with terminal branch
- for(auto& dstv : dst)
+ const auto& rule = arm_rules[first_any][ofs];
+ if(const auto* e = rule.opt_ValueRange())
{
- unify_branch(dstv.second, b);
+ // Generate branch based on range
+ this->gen_dispatch_range(arm_rules[first_any][ofs].field_path, e->first, e->last, next);
}
- }
- ),
- (Unsigned,
- if( should_unify_subtree ) {
- auto& sb = b.as_Subtree()->m_branches;
- ASSERT_BUG(Span(), sb.is_Unsigned(), "Unifying Unsigned with " << sb.tag_str());
- unify_from_vals_range(dst, sb.as_Unsigned());
- }
- else {
- for(auto& dstv : dst)
+ else if(const auto* e = rule.opt_SplitSlice())
{
- unify_branch(dstv.second, b);
+ // Generate branch based on slice length being at least required.
+ this->gen_dispatch_splitslice(rule.field_path, *e, next);
}
- }
- ),
- (Signed,
- if( should_unify_subtree ) {
- auto& sb = b.as_Subtree()->m_branches;
- ASSERT_BUG(Span(), sb.is_Signed(), "Unifying Signed with " << sb.tag_str());
- unify_from_vals_range(dst, sb.as_Signed());
- }
- else {
- for(auto& dstv : dst)
+ else
{
- unify_branch(dstv.second, b);
- }
- }
- ),
- (Float,
- if( should_unify_subtree ) {
- auto& sb = b.as_Subtree()->m_branches;
- ASSERT_BUG(Span(), sb.is_Float(), "Unifying Float with " << sb.tag_str());
- unify_from_vals_range(dst, sb.as_Float());
- }
- else {
- for(auto& dstv : dst) {
- unify_branch(dstv.second, b);
- }
- }
- ),
- (String,
- if( should_unify_subtree ) {
- auto& sb = b.as_Subtree()->m_branches;
- ASSERT_BUG(Span(), sb.is_String(), "Unifying String with " << sb.tag_str());
- unify_from_vals_pt(dst, sb.as_String());
- }
- else {
- for(auto& dstv : dst) {
- unify_branch( dstv.second, b );
+ ASSERT_BUG(sp, rule.is_Any(), "Didn't expect non-Any rule here, got " << rule.tag_str() << " " << rule);
}
- }
- ),
- (Slice,
- if( should_unify_subtree ) {
- auto& sb = b.as_Subtree()->m_branches;
- ASSERT_BUG(Span(), sb.is_Slice(), "Unifying Slice with " << sb.tag_str());
- const auto& src = sb.as_Slice();
- unify_from_vals_pt(dst.fixed_arms, src.fixed_arms);
- }
- else {
- for(auto& dstv : dst.fixed_arms) {
- unify_branch( dstv.second, b );
- }
- }
- )
- )
-}
+ // Step deeper into these arms
+ auto slice = arm_rules.sub_slice(first_any, idx - first_any);
+ this->gen_for_slice(mv$(slice), ofs+1, next);
-::std::ostream& operator<<(::std::ostream& os, const DecisionTreeNode::Branch& x) {
- TU_MATCHA( (x), (be),
- (Unset,
- os << "!";
- ),
- (Terminal,
- os << "ARM " << be;
- ),
- (Subtree,
- os << *be;
- )
- )
- return os;
-}
-::std::ostream& operator<<(::std::ostream& os, const DecisionTreeNode& x) {
- os << "DTN [" << x.m_field_path << "] { ";
- TU_MATCHA( (x.m_branches), (e),
- (Unset,
- os << "!, ";
- ),
- (Bool,
- os << "false = " << e.false_branch << ", true = " << e.true_branch << ", ";
- ),
- (Variant,
- os << "V ";
- for(const auto& branch : e) {
- os << branch.first << " = " << branch.second << ", ";
- }
- ),
- (Unsigned,
- os << "U ";
- for(const auto& branch : e) {
- const auto& range = branch.first;
- if( range.start == range.end ) {
- os << range.start;
- }
- else {
- os << range.start << "..." << range.end;
- }
- os << " = " << branch.second << ", ";
- }
- ),
- (Signed,
- os << "S ";
- for(const auto& branch : e) {
- const auto& range = branch.first;
- if( range.start == range.end ) {
- os << range.start;
- }
- else {
- os << range.start << "..." << range.end;
- }
- os << " = " << branch.second << ", ";
- }
- ),
- (Float,
- os << "F ";
- for(const auto& branch : e) {
- const auto& range = branch.first;
- if( range.start == range.end ) {
- os << range.start;
- }
- else {
- os << range.start << "..." << range.end;
+ if(has_next)
+ {
+ m_builder.set_cur_block(next);
}
- os << " = " << branch.second << ", ";
- }
- ),
- (String,
- for(const auto& branch : e) {
- os << "\"" << branch.first << "\"" << " = " << branch.second << ", ";
}
- ),
- (Slice,
- os << "len ";
- for(const auto& branch : e.fixed_arms) {
- os << "=" << branch.first << " = " << branch.second << ", ";
- }
- )
- )
+ }
- os << "* = " << x.m_default;
- os << " }";
- return os;
+ ASSERT_BUG(sp, ! m_builder.block_active(), "Block left active after match group");
}
-
-// ----------------------------
-// DecisionTreeGen
-// ----------------------------
-
-void DecisionTreeGen::generate_tree_code(
- const Span& sp,
- const DecisionTreeNode& node,
- const ::HIR::TypeRef& top_ty, unsigned int field_path_ofs, const ::MIR::LValue& top_val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- )
+void MatchGenGrouped::gen_dispatch(const ::std::vector<t_rules_subset>& rules, size_t ofs, const ::std::vector<::MIR::BasicBlockId>& arm_targets, ::MIR::BasicBlockId def_blk)
{
- TRACE_FUNCTION_F("top_ty=" << top_ty << ", field_path_ofs=" << field_path_ofs << ", top_val=" << top_val << ", node=" << node);
+ const auto& field_path = rules[0][0][ofs].field_path;
+ TRACE_FUNCTION_F("rules=["<<rules <<"], ofs=" << ofs <<", field_path=" << field_path);
+
+ {
+ size_t n = 0;
+ for(size_t i = 0; i < rules.size(); i++)
+ {
+ for(size_t j = 0; j < rules[i].size(); j++)
+ {
+ ASSERT_BUG(sp, rules[i][j][ofs].field_path == field_path, "Field path mismatch, " << rules[i][j][ofs].field_path << " != " << field_path);
+ n ++;
+ }
+ }
+ ASSERT_BUG(sp, arm_targets.size() == n, "Arm target count mismatch - " << n << " != " << arm_targets.size());
+ }
::MIR::LValue val;
::HIR::TypeRef ty;
-
- get_ty_and_val(sp, m_builder.resolve(), top_ty, top_val, node.m_field_path, field_path_ofs, ty, val);
+ get_ty_and_val(sp, m_builder.resolve(), m_top_ty, m_top_val, field_path, m_field_path_ofs, ty, val);
DEBUG("ty = " << ty << ", val = " << val);
- TU_MATCHA( (ty.m_data), (e),
- (Infer, BUG(sp, "Ivar for in match type"); ),
- (Diverge, BUG(sp, "Diverge in match type"); ),
- (Primitive,
- switch(e)
- {
- case ::HIR::CoreType::Bool:
- ASSERT_BUG(sp, node.m_branches.is_Bool(), "Tree for bool isn't a _Bool - node="<<node);
- this->generate_branches_Bool(sp, node.m_default, node.m_branches.as_Bool(), ty, mv$(val), mv$(and_then));
- break;
- case ::HIR::CoreType::U8:
- case ::HIR::CoreType::U16:
- case ::HIR::CoreType::U32:
- case ::HIR::CoreType::U64:
- case ::HIR::CoreType::U128:
- case ::HIR::CoreType::Usize:
- ASSERT_BUG(sp, node.m_branches.is_Unsigned(), "Tree for unsigned isn't a _Unsigned - node="<<node);
- this->generate_branches_Unsigned(sp, node.m_default, node.m_branches.as_Unsigned(), ty, mv$(val), mv$(and_then));
- break;
- case ::HIR::CoreType::I8:
- case ::HIR::CoreType::I16:
- case ::HIR::CoreType::I32:
- case ::HIR::CoreType::I64:
- case ::HIR::CoreType::I128:
- case ::HIR::CoreType::Isize:
- ASSERT_BUG(sp, node.m_branches.is_Signed(), "Tree for unsigned isn't a _Signed - node="<<node);
- this->generate_branches_Signed(sp, node.m_default, node.m_branches.as_Signed(), ty, mv$(val), mv$(and_then));
- break;
- case ::HIR::CoreType::Char:
- ASSERT_BUG(sp, node.m_branches.is_Unsigned(), "Tree for char isn't a _Unsigned - node="<<node);
- this->generate_branches_Char(sp, node.m_default, node.m_branches.as_Unsigned(), ty, mv$(val), mv$(and_then));
- break;
- case ::HIR::CoreType::Str:
- ASSERT_BUG(sp, node.m_branches.is_String(), "Tree for &str isn't a _String - node="<<node);
- this->generate_branches_Borrow_str(sp, node.m_default, node.m_branches.as_String(), ty, mv$(val), mv$(and_then));
- break;
- case ::HIR::CoreType::F32:
- case ::HIR::CoreType::F64:
- ASSERT_BUG(sp, node.m_branches.is_Float(), "Tree for float isn't a _Float - node="<<node);
- this->generate_branches_Float(sp, node.m_default, node.m_branches.as_Float(), ty, mv$(val), mv$(and_then));
- break;
- default:
- TODO(sp, "Primitive - " << ty);
- break;
- }
+ TU_MATCHA( (ty.m_data), (te),
+ (Infer,
+ BUG(sp, "Hit _ in type - " << ty);
),
- (Tuple,
- BUG(sp, "Decision node on tuple - node=" << node);
+ (Diverge,
+ BUG(sp, "Matching over !");
+ ),
+ (Primitive,
+ this->gen_dispatch__primitive(mv$(ty), mv$(val), rules, ofs, arm_targets, def_blk);
),
(Path,
- // This is either a struct destructure or an enum
- TU_MATCHA( (e.binding), (pbe),
- (Unbound,
- BUG(sp, "Encounterd unbound path - " << e.path);
- ),
- (Opaque,
- and_then(node);
- ),
- (Struct,
- assert(pbe);
- TU_MATCHA( (pbe->m_data), (fields),
- (Unit,
- and_then(node);
+ // Matching over a path can only happen with an enum.
+ // TODO: What about `box` destructures?
+ // - They're handled via hidden derefs
+ if( !te.binding.is_Enum() ) {
+ TU_MATCHA( (te.binding), (pbe),
+ (Unbound,
+ BUG(sp, "Encounterd unbound path - " << te.path);
),
- (Tuple,
- BUG(sp, "Decision node on tuple struct");
+ (Opaque,
+ BUG(sp, "Attempting to match over opaque type - " << ty);
),
- (Named,
- BUG(sp, "Decision node on struct");
+ (Struct,
+ const auto& str_data = pbe->m_data;
+ TU_MATCHA( (str_data), (sd),
+ (Unit,
+ BUG(sp, "Attempting to match over unit type - " << ty);
+ ),
+ (Tuple,
+ TODO(sp, "Matching on tuple-like struct?");
+ ),
+ (Named,
+ TODO(sp, "Matching on struct?");
+ )
+ )
+ ),
+ (Union,
+ TODO(sp, "Match over Union");
+ ),
+ (Enum,
)
)
- ),
- (Union,
- TODO(sp, "Decision node on Union");
- ),
- (Enum,
- ASSERT_BUG(sp, node.m_branches.is_Variant(), "Tree for enum isn't a Variant - node="<<node);
- assert(pbe);
- this->generate_branches_Enum(sp, node.m_default, node.m_branches.as_Variant(), node.m_field_path, ty, mv$(val), mv$(and_then));
- )
- )
+ }
+
+ this->gen_dispatch__enum(mv$(ty), mv$(val), rules, ofs, arm_targets, def_blk);
),
(Generic,
- and_then(node);
+ BUG(sp, "Attempting to match a generic");
),
(TraitObject,
- ERROR(sp, E0000, "Attempting to match over a trait object");
+ BUG(sp, "Attempting to match a trait object");
),
(ErasedType,
- ERROR(sp, E0000, "Attempting to match over an erased type");
+ BUG(sp, "Attempting to match an erased type");
),
(Array,
- // TODO: Slice patterns, sequential comparison/sub-match
- TODO(sp, "Match over array");
+ BUG(sp, "Attempting to match on an Array (should have been destructured)");
),
(Slice,
- ASSERT_BUG(sp, node.m_branches.is_Slice(), "Tree for [T] isn't a _Slice - node="<<node);
- this->generate_branches_Slice(sp, node.m_default, node.m_branches.as_Slice(), node.m_field_path, ty, mv$(val), mv$(and_then));
+ // TODO: Slice size matches!
+ this->gen_dispatch__slice(mv$(ty), mv$(val), rules, ofs, arm_targets, def_blk);
+ ),
+ (Tuple,
+ BUG(sp, "Match directly on tuple");
),
(Borrow,
- if( *e.inner == ::HIR::CoreType::Str ) {
- TODO(sp, "Match over &str");
- }
- else {
- BUG(sp, "Decision node on non-str/[T] borrow - " << ty);
- }
+ BUG(sp, "Match directly on borrow");
),
(Pointer,
- ERROR(sp, E0000, "Attempting to match over a pointer");
+ // TODO: Could this actually be valid?
+ BUG(sp, "Attempting to match a pointer - " << ty);
),
(Function,
- ERROR(sp, E0000, "Attempting to match over a functon pointer");
+ // TODO: Could this actually be valid?
+ BUG(sp, "Attempting to match a function pointer - " << ty);
),
(Closure,
- ERROR(sp, E0000, "Attempting to match over a closure");
+ BUG(sp, "Attempting to match a closure");
)
)
}
-void DecisionTreeGen::generate_branch(const DecisionTreeNode::Branch& branch, ::std::function<void(const DecisionTreeNode&)> cb)
-{
- assert( !branch.is_Unset() );
- if( branch.is_Terminal() ) {
- this->m_builder.end_block( ::MIR::Terminator::make_Goto( this->get_block_for_rule( branch.as_Terminal() ) ) );
- }
- else {
- assert( branch.is_Subtree() );
- const auto& subnode = *branch.as_Subtree();
-
- cb(subnode);
- }
-}
-
-::MIR::LValue DecisionTreeGen::push_compare(const Span& sp, ::MIR::LValue left, ::MIR::eBinOp op, ::MIR::Param right)
-{
- return m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool,
- ::MIR::RValue::make_BinOp({ mv$(left), op, mv$(right) })
- );
-}
-
-// TODO: Unify logic for these two, and support simpler checks for sequential values
-void DecisionTreeGen::generate_branches_Signed(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_Signed& branches,
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- )
-{
- auto ity = ty.m_data.as_Primitive();
- auto default_block = m_builder.new_bb_unlinked();
-
- // TODO: Convert into an integer switch w/ offset instead of chained comparisons
-
- for( const auto& branch : branches )
- {
- auto next_block = (&branch == &branches.back() ? default_block : m_builder.new_bb_unlinked());
-
- auto cmp_gt_block = m_builder.new_bb_unlinked();
- auto val_cmp_lt = push_compare(sp, val.clone(), ::MIR::eBinOp::LT, ::MIR::Constant::make_Int({ branch.first.start, ity }));
- m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_lt), default_block, cmp_gt_block }) );
- m_builder.set_cur_block( cmp_gt_block );
-
- auto success_block = m_builder.new_bb_unlinked();
- auto val_cmp_gt = push_compare(sp, val.clone(), ::MIR::eBinOp::GT, ::MIR::Constant::make_Int({ branch.first.end, ity }));
- m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_gt), next_block, success_block }) );
-
- m_builder.set_cur_block( success_block );
- this->generate_branch(branch.second, and_then);
-
- m_builder.set_cur_block( next_block );
- }
- assert( m_builder.block_active() );
-
- if( default_branch.is_Unset() ) {
- // TODO: Emit error if non-exhaustive
- m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
- }
- else {
- this->generate_branch(default_branch, and_then);
- }
-}
-
-void DecisionTreeGen::generate_branches_Unsigned(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_Unsigned& branches,
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- )
+void MatchGenGrouped::gen_dispatch__primitive(::HIR::TypeRef ty, ::MIR::LValue val, const ::std::vector<t_rules_subset>& rules, size_t ofs, const ::std::vector<::MIR::BasicBlockId>& arm_targets, ::MIR::BasicBlockId def_blk)
{
- auto ity = ty.m_data.as_Primitive();
- auto default_block = m_builder.new_bb_unlinked();
-
- // TODO: Convert into an integer switch w/ offset instead of chained comparisons
-
- for( const auto& branch : branches )
+ auto te = ty.m_data.as_Primitive();
+ switch(te)
{
- auto next_block = (&branch == &branches.back() ? default_block : m_builder.new_bb_unlinked());
-
- auto cmp_gt_block = m_builder.new_bb_unlinked();
- auto val_cmp_lt = push_compare(sp, val.clone(), ::MIR::eBinOp::LT, ::MIR::Constant::make_Uint({ branch.first.start, ity }));
- m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_lt), default_block, cmp_gt_block }) );
- m_builder.set_cur_block( cmp_gt_block );
+ case ::HIR::CoreType::Bool: {
+ ASSERT_BUG(sp, rules.size() <= 2, "More than 2 rules for boolean");
+ for(size_t i = 0; i < rules.size(); i++)
+ {
+ ASSERT_BUG(sp, rules[i][0][ofs].is_Bool(), "PatternRule for bool isn't _Bool");
+ }
+
+ // False sorts before true.
+ auto fail_bb = rules.size() == 2 ? arm_targets[ 0] : (rules[0][0][ofs].as_Bool() ? def_blk : arm_targets[0]);
+ auto succ_bb = rules.size() == 2 ? arm_targets[rules[0].size()] : (rules[0][0][ofs].as_Bool() ? arm_targets[0] : def_blk);
+
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val), succ_bb, fail_bb }) );
+ } break;
+ case ::HIR::CoreType::U8:
+ case ::HIR::CoreType::U16:
+ case ::HIR::CoreType::U32:
+ case ::HIR::CoreType::U64:
+ case ::HIR::CoreType::U128:
+ case ::HIR::CoreType::Usize:
+
+ case ::HIR::CoreType::I8:
+ case ::HIR::CoreType::I16:
+ case ::HIR::CoreType::I32:
+ case ::HIR::CoreType::I64:
+ case ::HIR::CoreType::I128:
+ case ::HIR::CoreType::Isize:
+
+ case ::HIR::CoreType::Char:
+ if( rules.size() == 1 )
+ {
+ // Special case, single option, equality only
+ const auto& r = rules[0][0][ofs];
+ ASSERT_BUG(sp, r.is_Value(), "Matching without _Value pattern - " << r.tag_str());
+ const auto& re = r.as_Value();
+ auto test_val = ::MIR::Param(re.clone());
+ auto cmp_lval = m_builder.get_rval_in_if_cond(sp, ::MIR::RValue::make_BinOp({ val.clone(), ::MIR::eBinOp::EQ, mv$(test_val) }));
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval), arm_targets[0], def_blk }) );
+ }
+ else
+ {
+ // TODO: Add a SwitchInt terminator for use with this. (Or just a SwitchVal terminator?)
- auto success_block = m_builder.new_bb_unlinked();
- auto val_cmp_gt = push_compare(sp, val.clone(), ::MIR::eBinOp::GT, ::MIR::Constant::make_Uint({ branch.first.end, ity }));
- m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_gt), next_block, success_block }) );
+ // NOTE: Rules are currently sorted
+ // TODO: If there are Constant::Const values in the list, they need to come first! (with equality checks)
- m_builder.set_cur_block( success_block );
- this->generate_branch(branch.second, and_then);
+ // Does a sorted linear search. Binary search would be nicer but is harder to implement.
+ size_t tgt_ofs = 0;
+ for(size_t i = 0; i < rules.size(); i++)
+ {
+ for(size_t j = 1; j < rules[i].size(); j ++)
+ ASSERT_BUG(sp, arm_targets[tgt_ofs] == arm_targets[tgt_ofs+j], "Mismatched target blocks for Value match");
+
+ const auto& r = rules[i][0][ofs];
+ ASSERT_BUG(sp, r.is_Value(), "Matching without _Value pattern - " << r.tag_str());
+ const auto& re = r.as_Value();
+ if(re.is_Const())
+ TODO(sp, "Handle Constant::Const in match");
+
+ // IF v < tst : def_blk
+ // Skip if the previous value was the imediat predecesor
+ bool is_succ = i != 0 && (re.is_Uint()
+ ? re.as_Uint().v == rules[i-1][0][ofs].as_Value().as_Uint().v + 1
+ : re.as_Int().v == rules[i-1][0][ofs].as_Value().as_Int().v + 1
+ );
+ if( !is_succ )
+ {
+ auto cmp_eq_blk = m_builder.new_bb_unlinked();
+ auto cmp_lval_lt = this->push_compare(val.clone(), ::MIR::eBinOp::LT, ::MIR::Param(re.clone()));
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval_lt), def_blk, cmp_eq_blk }) );
+ m_builder.set_cur_block(cmp_eq_blk);
+ }
- m_builder.set_cur_block( next_block );
- }
- assert( m_builder.block_active() );
+ // IF v == tst : target
+ {
+ auto next_cmp_blk = m_builder.new_bb_unlinked();
+ auto cmp_lval_eq = this->push_compare( val.clone(), ::MIR::eBinOp::EQ, ::MIR::Param(re.clone()) );
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval_eq), arm_targets[tgt_ofs], next_cmp_blk }) );
+ m_builder.set_cur_block(next_cmp_blk);
+ }
- if( default_branch.is_Unset() ) {
- // TODO: Emit error if non-exhaustive
- m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
- }
- else {
- this->generate_branch(default_branch, and_then);
- }
-}
+ tgt_ofs += rules[i].size();
+ }
+ m_builder.end_block( ::MIR::Terminator::make_Goto(def_blk) );
+ }
+ break;
+ case ::HIR::CoreType::F32:
+ case ::HIR::CoreType::F64: {
+ // NOTE: Rules are currently sorted
+ // TODO: If there are Constant::Const values in the list, they need to come first!
+ size_t tgt_ofs = 0;
+ for(size_t i = 0; i < rules.size(); i++)
+ {
+ for(size_t j = 1; j < rules[i].size(); j ++)
+ ASSERT_BUG(sp, arm_targets[tgt_ofs] == arm_targets[tgt_ofs+j], "Mismatched target blocks for Value match");
-void DecisionTreeGen::generate_branches_Float(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_Float& branches,
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- )
-{
- auto ity = ty.m_data.as_Primitive();
- auto default_block = m_builder.new_bb_unlinked();
+ const auto& r = rules[i][0][ofs];
+ ASSERT_BUG(sp, r.is_Value(), "Matching without _Value pattern - " << r.tag_str());
+ const auto& re = r.as_Value();
+ if(re.is_Const())
+ TODO(sp, "Handle Constant::Const in match");
- for( const auto& branch : branches )
- {
- auto next_block = (&branch == &branches.back() ? default_block : m_builder.new_bb_unlinked());
+ // IF v < tst : def_blk
+ {
+ auto cmp_eq_blk = m_builder.new_bb_unlinked();
+ auto cmp_lval_lt = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ val.clone(), ::MIR::eBinOp::LT, ::MIR::Param(re.clone()) }));
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval_lt), def_blk, cmp_eq_blk }) );
+ m_builder.set_cur_block(cmp_eq_blk);
+ }
- auto cmp_gt_block = m_builder.new_bb_unlinked();
- auto val_cmp_lt = push_compare(sp, val.clone(), ::MIR::eBinOp::LT, ::MIR::Constant::make_Float({ branch.first.start, ity }));
- m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_lt), default_block, cmp_gt_block }) );
- m_builder.set_cur_block( cmp_gt_block );
+ // IF v == tst : target
+ {
+ auto next_cmp_blk = m_builder.new_bb_unlinked();
+ auto cmp_lval_eq = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ val.clone(), ::MIR::eBinOp::EQ, ::MIR::Param(re.clone()) }));
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval_eq), arm_targets[tgt_ofs], next_cmp_blk }) );
+ m_builder.set_cur_block(next_cmp_blk);
+ }
+
+ tgt_ofs += rules[i].size();
+ }
+ m_builder.end_block( ::MIR::Terminator::make_Goto(def_blk) );
+ } break;
+ case ::HIR::CoreType::Str:
+ // Remove the deref on the &str
+ auto oval = mv$(val);
+ auto val = mv$(*oval.as_Deref().val);
+ // NOTE: Rules are currently sorted
+ // TODO: If there are Constant::Const values in the list, they need to come first!
+ size_t tgt_ofs = 0;
+ for(size_t i = 0; i < rules.size(); i++)
+ {
+ for(size_t j = 1; j < rules[i].size(); j ++)
+ ASSERT_BUG(sp, arm_targets[tgt_ofs] == arm_targets[tgt_ofs+j], "Mismatched target blocks for Value match");
- auto success_block = m_builder.new_bb_unlinked();
- auto val_cmp_gt = push_compare(sp, val.clone(), ::MIR::eBinOp::GT, ::MIR::Constant::make_Float({ branch.first.end, ity }));
- m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_gt), next_block, success_block }) );
+ const auto& r = rules[i][0][ofs];
+ ASSERT_BUG(sp, r.is_Value(), "Matching without _Value pattern - " << r.tag_str());
+ const auto& re = r.as_Value();
+ if(re.is_Const())
+ TODO(sp, "Handle Constant::Const in match");
- m_builder.set_cur_block( success_block );
- this->generate_branch(branch.second, and_then);
+ // IF v < tst : def_blk
+ {
+ auto cmp_eq_blk = m_builder.new_bb_unlinked();
+ auto cmp_lval_lt = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ val.clone(), ::MIR::eBinOp::LT, ::MIR::Param(re.clone()) }));
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval_lt), def_blk, cmp_eq_blk }) );
+ m_builder.set_cur_block(cmp_eq_blk);
+ }
- m_builder.set_cur_block( next_block );
- }
- assert( m_builder.block_active() );
+ // IF v == tst : target
+ {
+ auto next_cmp_blk = m_builder.new_bb_unlinked();
+ auto cmp_lval_eq = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ val.clone(), ::MIR::eBinOp::EQ, ::MIR::Param(re.clone()) }));
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval_eq), arm_targets[tgt_ofs], next_cmp_blk }) );
+ m_builder.set_cur_block(next_cmp_blk);
+ }
- if( default_branch.is_Unset() ) {
- ERROR(sp, E0000, "Match over floating point with no `_` arm");
- }
- else {
- this->generate_branch(default_branch, and_then);
+ tgt_ofs += rules[i].size();
+ }
+ m_builder.end_block( ::MIR::Terminator::make_Goto(def_blk) );
+ break;
}
}
-void DecisionTreeGen::generate_branches_Char(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_Unsigned& branches,
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- )
+void MatchGenGrouped::gen_dispatch__enum(::HIR::TypeRef ty, ::MIR::LValue val, const ::std::vector<t_rules_subset>& rules, size_t ofs, const ::std::vector<::MIR::BasicBlockId>& arm_targets, ::MIR::BasicBlockId def_blk)
{
- auto default_block = m_builder.new_bb_unlinked();
+ TRACE_FUNCTION;
+ auto& te = ty.m_data.as_Path();
+ const auto& pbe = te.binding.as_Enum();
- // TODO: Convert into an integer switch w/ offset instead of chained comparisons
+ auto decison_arm = m_builder.pause_cur_block();
- for( const auto& branch : branches )
+ auto var_count = pbe->m_variants.size();
+ ::std::vector< ::MIR::BasicBlockId> arms(var_count, def_blk);
+ size_t arm_idx = 0;
+ for(size_t i = 0; i < rules.size(); i ++)
{
- auto next_block = (&branch == &branches.back() ? default_block : m_builder.new_bb_unlinked());
-
- auto cmp_gt_block = m_builder.new_bb_unlinked();
- auto val_cmp_lt = push_compare(sp, val.clone(), ::MIR::eBinOp::LT, ::MIR::Constant::make_Uint({ branch.first.start, ::HIR::CoreType::Char }));
- m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_lt), default_block, cmp_gt_block }) );
- m_builder.set_cur_block( cmp_gt_block );
-
- auto success_block = m_builder.new_bb_unlinked();
- auto val_cmp_gt = push_compare(sp, val.clone(), ::MIR::eBinOp::GT, ::MIR::Constant::make_Uint({ branch.first.end, ::HIR::CoreType::Char }));
- m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_gt), next_block, success_block }) );
+ ASSERT_BUG(sp, rules[i][0][ofs].is_Variant(), "Rule for enum isn't Any or Variant - " << rules[i][0][ofs].tag_str());
+ const auto& re = rules[i][0][ofs].as_Variant();
+ unsigned int var_idx = re.idx;
+ DEBUG("Variant " << var_idx);
- m_builder.set_cur_block( success_block );
- this->generate_branch(branch.second, and_then);
+ ASSERT_BUG(sp, re.sub_rules.size() == 0, "Sub-rules in MatchGenGrouped");
- m_builder.set_cur_block( next_block );
- }
- assert( m_builder.block_active() );
-
- if( default_branch.is_Unset() ) {
- // TODO: Error if not exhaustive.
- m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
- }
- else {
- this->generate_branch(default_branch, and_then);
- }
-}
-void DecisionTreeGen::generate_branches_Bool(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_Bool& branches,
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- )
-{
- //assert( ty.m_data.is_Boolean() );
-
- if( default_branch.is_Unset() )
- {
- if( branches.false_branch.is_Unset() || branches.true_branch.is_Unset() ) {
- // Non-exhaustive match - ERROR
- }
- }
- else
- {
- if( branches.false_branch.is_Unset() && branches.true_branch.is_Unset() ) {
- // Unreachable default (NOTE: Not an error here)
+ arms[var_idx] = arm_targets[arm_idx];
+ for(size_t j = 0; j < rules[i].size(); j ++)
+ {
+ assert(arms[var_idx] == arm_targets[arm_idx]);
+ arm_idx ++;
}
}
- // Emit an if based on the route taken
- auto bb_false = m_builder.new_bb_unlinked();
- auto bb_true = m_builder.new_bb_unlinked();
- m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val), bb_true, bb_false }) );
-
- // Recurse into sub-patterns
- const auto& branch_false = ( !branches.false_branch.is_Unset() ? branches.false_branch : default_branch );
- const auto& branch_true = ( !branches. true_branch.is_Unset() ? branches. true_branch : default_branch );
-
- m_builder.set_cur_block(bb_true );
- this->generate_branch(branch_true , and_then);
- m_builder.set_cur_block(bb_false);
- this->generate_branch(branch_false, and_then);
-}
-
-void DecisionTreeGen::generate_branches_Borrow_str(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_String& branches,
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- )
-{
- // TODO: Chained comparisons with ordering.
- // - Would this just emit a eBinOp? That implies deep codegen support for strings.
- // - rustc emits calls to PartialEq::eq for this and for slices. mrustc could use PartialOrd and fall back to PartialEq if unavaliable?
- // > Requires crate access here! - A memcmp call is probably better, probably via a binop
- // NOTE: The below implementation gets the final codegen to call memcmp on the strings by emitting eBinOp::{LT,GT}
-
- // - Remove the wrapping Deref (which must be there)
- ASSERT_BUG(sp, val.is_Deref(), "Match over str without a deref - " << val);
- auto tmp = mv$( *val.as_Deref().val );
- val = mv$(tmp);
-
- auto default_bb = m_builder.new_bb_unlinked();
-
- // TODO: Binary search? Perfect-Hash-Function?
- assert( !branches.empty() );
- for(const auto& branch : branches)
- {
- auto next_bb = (&branch == &branches.back() ? default_bb : m_builder.new_bb_unlinked());
-
- auto cmp_gt_bb = m_builder.new_bb_unlinked();
-
- auto lt_val = push_compare(sp, val.clone(), ::MIR::eBinOp::LT, ::MIR::Constant(branch.first) );
- m_builder.end_block( ::MIR::Terminator::make_If({ mv$(lt_val), default_bb, cmp_gt_bb }) );
- m_builder.set_cur_block(cmp_gt_bb);
-
- auto eq_bb = m_builder.new_bb_unlinked();
- auto gt_val = push_compare(sp, val.clone(), ::MIR::eBinOp::GT, ::MIR::Constant(branch.first) );
- m_builder.end_block( ::MIR::Terminator::make_If({ mv$(gt_val), next_bb, eq_bb }) );
- m_builder.set_cur_block(eq_bb);
-
- this->generate_branch(branch.second, and_then);
-
- m_builder.set_cur_block(next_bb);
- }
- this->generate_branch(default_branch, and_then);
+ m_builder.set_cur_block(decison_arm);
+ m_builder.end_block( ::MIR::Terminator::make_Switch({ mv$(val), mv$(arms) }) );
}
-void DecisionTreeGen::generate_branches_Enum(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_Variant& branches,
- const field_path_t& field_path,
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- )
+void MatchGenGrouped::gen_dispatch__slice(::HIR::TypeRef ty, ::MIR::LValue val, const ::std::vector<t_rules_subset>& rules, size_t ofs, const ::std::vector<::MIR::BasicBlockId>& arm_targets, ::MIR::BasicBlockId def_blk)
{
- const auto& enum_ref = *ty.m_data.as_Path().binding.as_Enum();
- const auto& enum_path = ty.m_data.as_Path().path.m_data.as_Generic();
- const auto& variants = enum_ref.m_variants;
- auto variant_count = variants.size();
- bool has_any = ! default_branch.is_Unset();
-
- if( branches.size() < variant_count && ! has_any ) {
- ERROR(sp, E0000, "Non-exhaustive match over " << ty << " - " << branches.size() << " out of " << variant_count << " present");
- }
- // DISABLED: Some complex matches don't directly use some defaults
- //if( branches.size() == variant_count && has_any ) {
- // ERROR(sp, E0000, "Unreachable _ arm - " << branches.size() << " variants in " << enum_path);
- //}
+ auto val_len = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Usize, ::MIR::RValue::make_DstMeta({ m_builder.get_ptr_to_dst(sp, val).clone() }));
- auto any_block = (has_any ? m_builder.new_bb_unlinked() : 0);
+ // TODO: Re-sort the rules list to interleve Constant::Bytes and Slice
- // Emit a switch over the variant
- ::std::vector< ::MIR::BasicBlockId> variant_blocks;
- variant_blocks.reserve( variant_count );
- for( const auto& branch : branches )
- {
- if( variant_blocks.size() != branch.first ) {
- assert( variant_blocks.size() < branch.first );
- assert( has_any );
- variant_blocks.resize( branch.first, any_block );
- }
- variant_blocks.push_back( m_builder.new_bb_unlinked() );
- }
- if( variant_blocks.size() != variant_count )
+ // Just needs to check the lengths, then dispatch.
+ size_t tgt_ofs = 0;
+ for(size_t i = 0; i < rules.size(); i++)
{
- ASSERT_BUG(sp, variant_blocks.size() < variant_count, "Branch count (" << variant_blocks.size() << ") > variant count (" << variant_count << ") in match of " << ty);
- ASSERT_BUG(sp, has_any, "Non-exhaustive match and no any arm");
- variant_blocks.resize( variant_count, any_block );
- }
- bool any_arm_used = ::std::any_of( variant_blocks.begin(), variant_blocks.end(), [any_block](const auto& blk){ return blk == any_block; } );
+ const auto& r = rules[i][0][ofs];
+ if(const auto* re = r.opt_Slice())
+ {
+ ASSERT_BUG(sp, re->sub_rules.size() == 0, "Sub-rules in MatchGenGrouped");
+ auto val_tst = ::MIR::Constant::make_Uint({ re->len, ::HIR::CoreType::Usize });
- m_builder.end_block( ::MIR::Terminator::make_Switch({
- val.clone(), variant_blocks // NOTE: Copies the list, so it can be used lower down
- }) );
+ for(size_t j = 0; j < rules[i].size(); j ++)
+ assert(arm_targets[tgt_ofs] == arm_targets[tgt_ofs+j]);
- // Emit sub-patterns, looping over variants
- for( const auto& branch : branches )
- {
- auto bb = variant_blocks[branch.first];
- const auto& var = variants[branch.first];
- DEBUG(branch.first << " " << var.first << " = " << branch.second);
+ // IF v < tst : target
+ if( re->len > 0 )
+ {
+ auto cmp_eq_blk = m_builder.new_bb_unlinked();
+ auto cmp_lval_lt = this->push_compare( val_len.clone(), ::MIR::eBinOp::LT, val_tst.clone() );
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval_lt), def_blk, cmp_eq_blk }) );
+ m_builder.set_cur_block(cmp_eq_blk);
+ }
- auto var_lval = ::MIR::LValue::make_Downcast({ box$(val.clone()), branch.first });
+ // IF v == tst : target
+ {
+ auto next_cmp_blk = m_builder.new_bb_unlinked();
+ auto cmp_lval_eq = this->push_compare( val_len.clone(), ::MIR::eBinOp::EQ, mv$(val_tst) );
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval_eq), arm_targets[tgt_ofs], next_cmp_blk }) );
+ m_builder.set_cur_block(next_cmp_blk);
+ }
+ }
+ else if(const auto* re = r.opt_Value())
+ {
+ ASSERT_BUG(sp, re->is_Bytes(), "Slice with non-Bytes value - " << *re);
+ const auto& b = re->as_Bytes();
- ::HIR::TypeRef fake_ty;
+ auto val_tst = ::MIR::Constant::make_Uint({ b.size(), ::HIR::CoreType::Usize });
+ auto cmp_slice_val = m_builder.lvalue_or_temp(sp,
+ ::HIR::TypeRef::new_borrow( ::HIR::BorrowType::Shared, ::HIR::TypeRef::new_slice(::HIR::CoreType::U8) ),
+ ::MIR::RValue::make_MakeDst({ ::MIR::Param(re->clone()), val_tst.clone() })
+ );
- TU_MATCHA( (var.second), (e),
- (Unit,
- DEBUG("- Unit");
- ),
- (Value,
- DEBUG("- Value");
- ),
- (Tuple,
- // Make a fake tuple
- ::std::vector< ::HIR::TypeRef> ents;
- for( const auto& fld : e )
+ if( b.size() > 0 )
{
- ents.push_back( monomorphise_type(sp, enum_ref.m_params, enum_path.m_params, fld.ent) );
+ auto cmp_eq_blk = m_builder.new_bb_unlinked();
+ auto cmp_lval_lt = this->push_compare( val_len.clone(), ::MIR::eBinOp::LT, val_tst.clone() );
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval_lt), def_blk, cmp_eq_blk }) );
+ m_builder.set_cur_block(cmp_eq_blk);
}
- fake_ty = ::HIR::TypeRef( mv$(ents) );
- m_builder.resolve().expand_associated_types(sp, fake_ty);
- DEBUG("- Tuple - " << fake_ty);
- ),
- (Struct,
- ::std::vector< ::HIR::TypeRef> ents;
- for( const auto& fld : e )
+
+ // IF v == tst : target
{
- ents.push_back( monomorphise_type(sp, enum_ref.m_params, enum_path.m_params, fld.second.ent) );
- }
- fake_ty = ::HIR::TypeRef( mv$(ents) );
- m_builder.resolve().expand_associated_types(sp, fake_ty);
- DEBUG("- Struct - " << fake_ty);
- )
- )
+ auto succ_blk = m_builder.new_bb_unlinked();
+ auto next_cmp_blk = m_builder.new_bb_unlinked();
+ auto cmp_lval_eq = this->push_compare( val_len.clone(), ::MIR::eBinOp::EQ, mv$(val_tst) );
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval_eq), succ_blk, next_cmp_blk }) );
+ m_builder.set_cur_block(succ_blk);
- m_builder.set_cur_block( bb );
- if( fake_ty == ::HIR::TypeRef() || fake_ty.m_data.as_Tuple().size() == 0 ) {
- this->generate_branch(branch.second, and_then);
- }
- else {
- this->generate_branch(branch.second, [&](auto& subnode) {
- // Call special handler to determine when the enum is over
- this->generate_tree_code__enum(sp, subnode, fake_ty, var_lval, field_path, and_then);
- });
- }
- }
+ // TODO: What if `val` isn't a Deref?
+ ASSERT_BUG(sp, val.is_Deref(), "TODO: Handle non-Deref matches of byte strings");
+ cmp_lval_eq = this->push_compare( val.as_Deref().val->clone(), ::MIR::eBinOp::EQ, mv$(cmp_slice_val) );
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval_eq), arm_targets[tgt_ofs], def_blk }) );
- if( any_arm_used )
- {
- DEBUG("_ = " << default_branch);
- if( !default_branch.is_Unset() )
+ m_builder.set_cur_block(next_cmp_blk);
+ }
+ }
+ else
{
- m_builder.set_cur_block(any_block);
- this->generate_branch(default_branch, and_then);
+ BUG(sp, "Matching without _Slice pattern - " << r.tag_str() << " - " << r);
}
- }
- else
- {
- DEBUG("_ = UNUSED - " << default_branch);
- }
-}
-void DecisionTreeGen::generate_branches_Slice(
- const Span& sp,
- const DecisionTreeNode::Branch& default_branch,
- const DecisionTreeNode::Values::Data_Slice& branches,
- const field_path_t& field_path,
- const ::HIR::TypeRef& ty, ::MIR::LValue val,
- ::std::function<void(const DecisionTreeNode&)> and_then
- )
-{
- if( default_branch.is_Unset() ) {
- ERROR(sp, E0000, "Non-exhaustive match over " << ty);
+ tgt_ofs += rules[i].size();
}
+ m_builder.end_block( ::MIR::Terminator::make_Goto(def_blk) );
+}
- auto val_len = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Usize, ::MIR::RValue::make_DstMeta({ m_builder.get_ptr_to_dst(sp, val).clone() }));
-
- // NOTE: Un-deref the slice
- ASSERT_BUG(sp, val.is_Deref(), "slice matches must be passed a deref");
- auto tmp = mv$( *val.as_Deref().val );
- val = mv$(tmp);
-
- auto any_block = m_builder.new_bb_unlinked();
- // TODO: Select one of three ways of picking the arm:
- // - Integer switch (unimplemented)
- // - Binary search
- // - Sequential comparisons
+void MatchGenGrouped::gen_dispatch_range(const field_path_t& field_path, const ::MIR::Constant& first, const ::MIR::Constant& last, ::MIR::BasicBlockId def_blk)
+{
+ TRACE_FUNCTION_F("field_path="<<field_path<<", " << first << " ... " << last);
+ ::MIR::LValue val;
+ ::HIR::TypeRef ty;
+ get_ty_and_val(sp, m_builder.resolve(), m_top_ty, m_top_val, field_path, m_field_path_ofs, ty, val);
+ DEBUG("ty = " << ty << ", val = " << val);
- // TODO: Binary search instead?
- for( const auto& branch : branches.fixed_arms )
+ if( const auto* tep = ty.m_data.opt_Primitive() )
{
- auto val_des = ::MIR::Constant::make_Uint({ static_cast<uint64_t>(branch.first), ::HIR::CoreType::Usize });
-
- // Special case - final just does equality
- if( &branch == &branches.fixed_arms.back() )
- {
- auto val_cmp_eq = push_compare(sp, val_len.clone(), ::MIR::eBinOp::EQ, mv$(val_des));
-
- auto success_block = m_builder.new_bb_unlinked();
- m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_eq), success_block, any_block }) );
+ auto te = *tep;
- m_builder.set_cur_block( success_block );
- this->generate_branch(branch.second, and_then);
+ bool lower_possible = true;
+ bool upper_possible = true;
- m_builder.set_cur_block( any_block );
- }
- // Special case for zero (which can't have a LT)
- else if( branch.first == 0 )
+ switch(te)
{
- auto next_block = m_builder.new_bb_unlinked();
- auto val_cmp_eq = push_compare(sp, val_len.clone(), ::MIR::eBinOp::EQ, mv$(val_des));
-
- auto success_block = m_builder.new_bb_unlinked();
- m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_eq), success_block, next_block }) );
+ case ::HIR::CoreType::Bool:
+ BUG(sp, "Range match over Bool");
+ break;
+ case ::HIR::CoreType::Str:
+ BUG(sp, "Range match over Str - is this valid?");
+ break;
+ case ::HIR::CoreType::U8:
+ case ::HIR::CoreType::U16:
+ case ::HIR::CoreType::U32:
+ case ::HIR::CoreType::U64:
+ case ::HIR::CoreType::U128:
+ case ::HIR::CoreType::Usize:
+ lower_possible = (first.as_Uint().v > 0);
+ // TODO: Should this also check for the end being the max value of the type?
+ // - Can just leave that to the optimiser
+ upper_possible = true;
+ break;
+ case ::HIR::CoreType::I8:
+ case ::HIR::CoreType::I16:
+ case ::HIR::CoreType::I32:
+ case ::HIR::CoreType::I64:
+ case ::HIR::CoreType::I128:
+ case ::HIR::CoreType::Isize:
+ lower_possible = true;
+ upper_possible = true;
+ break;
+ case ::HIR::CoreType::Char:
+ lower_possible = (first.as_Uint().v > 0);
+ upper_possible = (first.as_Uint().v <= 0x10FFFF);
+ break;
+ case ::HIR::CoreType::F32:
+ case ::HIR::CoreType::F64:
+ // NOTE: No upper or lower limits
+ break;
+ }
- m_builder.set_cur_block( success_block );
- this->generate_branch(branch.second, and_then);
+ if( lower_possible )
+ {
+ auto test_bb_2 = m_builder.new_bb_unlinked();
+ // IF `val` < `first` : fail_bb
+ auto cmp_lt_lval = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ ::MIR::Param(val.clone()), ::MIR::eBinOp::LT, ::MIR::Param(first.clone()) }));
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lt_lval), def_blk, test_bb_2 }) );
- m_builder.set_cur_block( next_block );
+ m_builder.set_cur_block(test_bb_2);
}
- // General case, with two comparisons
- else
- {
- auto next_block = m_builder.new_bb_unlinked();
- auto cmp_gt_block = m_builder.new_bb_unlinked();
- auto val_cmp_lt = push_compare(sp, val_len.clone(), ::MIR::eBinOp::LT, val_des.clone());
- m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_lt), any_block, cmp_gt_block }) );
- m_builder.set_cur_block( cmp_gt_block );
- auto success_block = m_builder.new_bb_unlinked();
- auto val_cmp_gt = push_compare(sp, val_len.clone(), ::MIR::eBinOp::GT, mv$(val_des));
- m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_gt), next_block, success_block }) );
+ if( upper_possible )
+ {
+ auto succ_bb = m_builder.new_bb_unlinked();
- m_builder.set_cur_block( success_block );
- this->generate_branch(branch.second, and_then);
+ // IF `val` > `last` : fail_bb
+ auto cmp_gt_lval = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ ::MIR::Param(val.clone()), ::MIR::eBinOp::GT, ::MIR::Param(last.clone()) }));
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_gt_lval), def_blk, succ_bb }) );
- m_builder.set_cur_block( next_block );
+ m_builder.set_cur_block(succ_bb);
}
}
- assert( m_builder.block_active() );
-
- if( default_branch.is_Unset() ) {
- // TODO: Emit error if non-exhaustive
- m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
- }
- else {
- this->generate_branch(default_branch, and_then);
+ else
+ {
+ TODO(sp, "ValueRange on " << ty);
}
}
+void MatchGenGrouped::gen_dispatch_splitslice(const field_path_t& field_path, const PatternRule::Data_SplitSlice& e, ::MIR::BasicBlockId def_blk)
+{
+ TRACE_FUNCTION_F("field_path="<<field_path<<", [" << e.leading << ", .., " << e.trailing << "]");
+ ::MIR::LValue val;
+ ::HIR::TypeRef ty;
+ get_ty_and_val(sp, m_builder.resolve(), m_top_ty, m_top_val, field_path, m_field_path_ofs, ty, val);
+ DEBUG("ty = " << ty << ", val = " << val);
-namespace {
- bool path_starts_with(const field_path_t& test, const field_path_t& prefix)
+ ASSERT_BUG(sp, e.leading.size() == 0, "Sub-rules in MatchGenGrouped");
+ ASSERT_BUG(sp, e.trailing.size() == 0, "Sub-rules in MatchGenGrouped");
+ ASSERT_BUG(sp, ty.m_data.is_Slice(), "SplitSlice pattern on non-slice - " << ty);
+
+ // Obtain slice length
+ auto val_len = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Usize, ::MIR::RValue::make_DstMeta({ m_builder.get_ptr_to_dst(sp, val).clone() }));
+
+ // 1. Check that length is sufficient for the pattern to be used
+ // `IF len < min_len : def_blk, next
{
- //DEBUG("test="<<test<<", prefix="<<prefix);
- if( test.size() < prefix.size() )
- {
- return false;
- }
- else if( ! ::std::equal(prefix.data.begin(), prefix.data.end(), test.data.begin()) )
- {
- return false;
- }
- else
- {
- return true;
- }
+ auto next = m_builder.new_bb_unlinked();
+ auto cmp_val = this->push_compare(val_len.clone(), ::MIR::eBinOp::LT, ::MIR::Constant::make_Uint({ e.min_len, ::HIR::CoreType::Usize }));
+ m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_val), def_blk, next }) );
+ m_builder.set_cur_block(next);
}
-}
-void DecisionTreeGen::generate_tree_code__enum(
- const Span& sp,
- const DecisionTreeNode& node, const ::HIR::TypeRef& fake_ty, const ::MIR::LValue& val,
- const field_path_t& path_prefix,
- ::std::function<void(const DecisionTreeNode&)> and_then
- )
-{
- if( ! path_starts_with(node.m_field_path, path_prefix) )
+ // 2. Recurse into leading patterns.
+ if( e.min_len > e.trailing_len )
{
- and_then(node);
+ auto next = m_builder.new_bb_unlinked();
+ auto inner_set = t_rules_subset { 1, /*is_arm_indexes=*/false };
+ inner_set.push_bb( e.leading, next );
+ auto inst = MatchGenGrouped { m_builder, sp, ty, val, {}, field_path.size() };
+ inst.gen_for_slice(inner_set, 0, def_blk);
+
+ m_builder.set_cur_block(next);
}
- else
+
+ if( e.trailing_len != 0 )
{
- this->generate_tree_code(sp, node, fake_ty, path_prefix.size(), val,
- [&](const auto& next_node) {
- if( ! path_starts_with(next_node.m_field_path, path_prefix) )
- {
- and_then(next_node);
- }
- else
- {
- this->generate_tree_code__enum(sp, next_node, fake_ty, val, path_prefix, and_then);
- }
- });
+ TODO(sp, "Handle trailing rules in SplitSlice - " << e.trailing);
}
}
diff --git a/src/mir/helpers.cpp b/src/mir/helpers.cpp
index 9c2107ab..9242ccb7 100644
--- a/src/mir/helpers.cpp
+++ b/src/mir/helpers.cpp
@@ -10,6 +10,7 @@
#include <hir/hir.hpp>
#include <hir/type.hpp>
#include <mir/mir.hpp>
+#include <algorithm> // ::std::find
void ::MIR::TypeResolve::fmt_pos(::std::ostream& os) const
{
@@ -33,6 +34,13 @@ void ::MIR::TypeResolve::print_msg(const char* tag, ::std::function<void(::std::
//throw CheckFailure {};
}
+unsigned int ::MIR::TypeResolve::get_cur_stmt_ofs() const
+{
+ if( this->stmt_idx == STMT_TERM )
+ return m_fcn.blocks.at(this->bb_idx).statements.size();
+ else
+ return this->stmt_idx;
+}
const ::MIR::BasicBlock& ::MIR::TypeResolve::get_block(::MIR::BasicBlockId id) const
{
MIR_ASSERT(*this, id < m_fcn.blocks.size(), "Block ID " << id << " out of range");
@@ -252,14 +260,17 @@ const ::HIR::TypeRef& ::MIR::TypeResolve::get_lvalue_type(::HIR::TypeRef& tmp, c
auto v = m_resolve.get_value(this->sp, e.p, p, /*signature_only=*/true);
if( const auto* ve = v.opt_Constant() ) {
const auto& ty = (*ve)->m_type;
- MIR_TODO(*this, "Monomorphise type " << ty);
+ if( monomorphise_type_needed(ty) )
+ MIR_TODO(*this, "get_const_type - Monomorphise type " << ty);
+ else
+ return ty.clone();
}
else {
- MIR_BUG(*this, "");
+ MIR_BUG(*this, "get_const_type - Not a constant");
}
),
(ItemAddr,
- MIR_TODO(*this, "Get type for constant `" << c << "`");
+ MIR_TODO(*this, "get_const_type - Get type for constant `" << c << "`");
)
)
throw "";
@@ -268,3 +279,1287 @@ const ::HIR::TypeRef* ::MIR::TypeResolve::is_type_owned_box(const ::HIR::TypeRef
{
return m_resolve.is_type_owned_box(ty);
}
+
+using namespace MIR::visit;
+
+// --------------------------------------------------------------------
+// MIR_Helper_GetLifetimes
+// --------------------------------------------------------------------
+namespace MIR {
+namespace visit {
+ bool visit_mir_lvalue(const ::MIR::LValue& lv, ValUsage u, ::std::function<bool(const ::MIR::LValue& , ValUsage)> cb)
+ {
+ if( cb(lv, u) )
+ return true;
+ TU_MATCHA( (lv), (e),
+ (Variable,
+ ),
+ (Argument,
+ ),
+ (Temporary,
+ ),
+ (Static,
+ ),
+ (Return,
+ ),
+ (Field,
+ return visit_mir_lvalue(*e.val, u, cb);
+ ),
+ (Deref,
+ return visit_mir_lvalue(*e.val, ValUsage::Read, cb);
+ ),
+ (Index,
+ bool rv = false;
+ rv |= visit_mir_lvalue(*e.val, u, cb);
+ rv |= visit_mir_lvalue(*e.idx, ValUsage::Read, cb);
+ return rv;
+ ),
+ (Downcast,
+ return visit_mir_lvalue(*e.val, u, cb);
+ )
+ )
+ return false;
+ }
+
+ bool visit_mir_lvalue(const ::MIR::Param& p, ValUsage u, ::std::function<bool(const ::MIR::LValue& , ValUsage)> cb)
+ {
+ if( const auto* e = p.opt_LValue() )
+ {
+ if(cb(*e, ValUsage::Move))
+ return true;
+ return visit_mir_lvalue(*e, u, cb);
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ bool visit_mir_lvalues(const ::MIR::RValue& rval, ::std::function<bool(const ::MIR::LValue& , ValUsage)> cb)
+ {
+ bool rv = false;
+ TU_MATCHA( (rval), (se),
+ (Use,
+ if(cb(se, ValUsage::Move))
+ return true;
+ rv |= visit_mir_lvalue(se, ValUsage::Read, cb);
+ ),
+ (Constant,
+ ),
+ (SizedArray,
+ rv |= visit_mir_lvalue(se.val, ValUsage::Read, cb);
+ ),
+ (Borrow,
+ rv |= visit_mir_lvalue(se.val, ValUsage::Borrow, cb);
+ ),
+ (Cast,
+ rv |= visit_mir_lvalue(se.val, ValUsage::Read, cb);
+ ),
+ (BinOp,
+ rv |= visit_mir_lvalue(se.val_l, ValUsage::Read, cb);
+ rv |= visit_mir_lvalue(se.val_r, ValUsage::Read, cb);
+ ),
+ (UniOp,
+ rv |= visit_mir_lvalue(se.val, ValUsage::Read, cb);
+ ),
+ (DstMeta,
+ rv |= visit_mir_lvalue(se.val, ValUsage::Read, cb);
+ ),
+ (DstPtr,
+ rv |= visit_mir_lvalue(se.val, ValUsage::Read, cb);
+ ),
+ (MakeDst,
+ rv |= visit_mir_lvalue(se.ptr_val, ValUsage::Read, cb);
+ rv |= visit_mir_lvalue(se.meta_val, ValUsage::Read, cb);
+ ),
+ (Tuple,
+ for(auto& v : se.vals)
+ rv |= visit_mir_lvalue(v, ValUsage::Read, cb);
+ ),
+ (Array,
+ for(auto& v : se.vals)
+ rv |= visit_mir_lvalue(v, ValUsage::Read, cb);
+ ),
+ (Variant,
+ rv |= visit_mir_lvalue(se.val, ValUsage::Read, cb);
+ ),
+ (Struct,
+ for(auto& v : se.vals)
+ rv |= visit_mir_lvalue(v, ValUsage::Read, cb);
+ )
+ )
+ return rv;
+ }
+
+ bool visit_mir_lvalues(const ::MIR::Statement& stmt, ::std::function<bool(const ::MIR::LValue& , ValUsage)> cb)
+ {
+ bool rv = false;
+ TU_MATCHA( (stmt), (e),
+ (Assign,
+ rv |= visit_mir_lvalues(e.src, cb);
+ rv |= visit_mir_lvalue(e.dst, ValUsage::Write, cb);
+ ),
+ (Asm,
+ for(auto& v : e.inputs)
+ rv |= visit_mir_lvalue(v.second, ValUsage::Read, cb);
+ for(auto& v : e.outputs)
+ rv |= visit_mir_lvalue(v.second, ValUsage::Write, cb);
+ ),
+ (SetDropFlag,
+ ),
+ (Drop,
+ rv |= visit_mir_lvalue(e.slot, ValUsage::Move, cb);
+ ),
+ (ScopeEnd,
+ )
+ )
+ return rv;
+ }
+
+ bool visit_mir_lvalues(const ::MIR::Terminator& term, ::std::function<bool(const ::MIR::LValue& , ValUsage)> cb)
+ {
+ bool rv = false;
+ TU_MATCHA( (term), (e),
+ (Incomplete,
+ ),
+ (Return,
+ ),
+ (Diverge,
+ ),
+ (Goto,
+ ),
+ (Panic,
+ ),
+ (If,
+ rv |= visit_mir_lvalue(e.cond, ValUsage::Read, cb);
+ ),
+ (Switch,
+ rv |= visit_mir_lvalue(e.val, ValUsage::Read, cb);
+ ),
+ (Call,
+ if( e.fcn.is_Value() ) {
+ rv |= visit_mir_lvalue(e.fcn.as_Value(), ValUsage::Read, cb);
+ }
+ for(auto& v : e.args)
+ rv |= visit_mir_lvalue(v, ValUsage::Read, cb);
+ rv |= visit_mir_lvalue(e.ret_val, ValUsage::Write, cb);
+ )
+ )
+ return rv;
+ }
+ /*
+ void visit_mir_lvalues_mut(::MIR::TypeResolve& state, ::MIR::Function& fcn, ::std::function<bool(::MIR::LValue& , ValUsage)> cb)
+ {
+ for(unsigned int block_idx = 0; block_idx < fcn.blocks.size(); block_idx ++)
+ {
+ auto& block = fcn.blocks[block_idx];
+ for(auto& stmt : block.statements)
+ {
+ state.set_cur_stmt(block_idx, (&stmt - &block.statements.front()));
+ visit_mir_lvalues_mut(stmt, cb);
+ }
+ if( block.terminator.tag() == ::MIR::Terminator::TAGDEAD )
+ continue ;
+ state.set_cur_stmt_term(block_idx);
+ visit_mir_lvalues_mut(block.terminator, cb);
+ }
+ }
+ void visit_mir_lvalues(::MIR::TypeResolve& state, const ::MIR::Function& fcn, ::std::function<bool(const ::MIR::LValue& , ValUsage)> cb)
+ {
+ visit_mir_lvalues_mut(state, const_cast<::MIR::Function&>(fcn), [&](auto& lv, auto im){ return cb(lv, im); });
+ }
+ */
+} // namespace visit
+} // namespace MIR
+namespace
+{
+ struct ValueLifetime
+ {
+ ::std::vector<bool> stmt_bitmap;
+ ValueLifetime(size_t stmt_count):
+ stmt_bitmap(stmt_count)
+ {}
+
+ void fill(const ::std::vector<size_t>& block_offsets, size_t bb, size_t first_stmt, size_t last_stmt)
+ {
+ size_t limit = block_offsets[bb+1] - block_offsets[bb] - 1;
+ DEBUG("bb" << bb << " : " << first_stmt << "--" << last_stmt);
+ assert(first_stmt <= limit);
+ assert(last_stmt <= limit);
+ for(size_t stmt = first_stmt; stmt <= last_stmt; stmt++)
+ {
+ stmt_bitmap[block_offsets[bb] + stmt] = true;
+ }
+ }
+
+ void dump_debug(const char* suffix, unsigned i, const ::std::vector<size_t>& block_offsets)
+ {
+ ::std::string name = FMT(suffix << "$" << i);
+ while(name.size() < 3+1+3)
+ name += " ";
+ DEBUG(name << " : " << FMT_CB(os,
+ for(unsigned int j = 0; j < this->stmt_bitmap.size(); j++)
+ {
+ if(j != 0 && ::std::find(block_offsets.begin(), block_offsets.end(), j) != block_offsets.end())
+ os << "|";
+ os << (this->stmt_bitmap[j] ? "X" : " ");
+ }
+ ));
+ }
+ };
+}
+#if 1
+void MIR_Helper_GetLifetimes_DetermineValueLifetime(::MIR::TypeResolve& state, const ::MIR::Function& fcn, size_t bb_idx, size_t stmt_idx, const ::MIR::LValue& lv, const ::std::vector<size_t>& block_offsets, ValueLifetime& vl);
+
+::MIR::ValueLifetimes MIR_Helper_GetLifetimes(::MIR::TypeResolve& state, const ::MIR::Function& fcn, bool dump_debug)
+{
+ TRACE_FUNCTION_F(state);
+
+ size_t statement_count = 0;
+ ::std::vector<size_t> block_offsets;
+ block_offsets.reserve( fcn.blocks.size() );
+ for(const auto& bb : fcn.blocks)
+ {
+ block_offsets.push_back(statement_count);
+ statement_count += bb.statements.size() + 1; // +1 for the terminator
+ }
+ block_offsets.push_back(statement_count); // Store the final limit for later code to use.
+
+ ::std::vector<ValueLifetime> temporary_lifetimes( fcn.temporaries.size(), ValueLifetime(statement_count) );
+ ::std::vector<ValueLifetime> variable_lifetimes( fcn.named_variables.size(), ValueLifetime(statement_count) );
+
+
+ // Enumerate direct assignments of variables (linear iteration of BB list)
+ for(size_t bb_idx = 0; bb_idx < fcn.blocks.size(); bb_idx ++)
+ {
+ auto assigned_lvalue = [&](size_t bb_idx, size_t stmt_idx, const ::MIR::LValue& lv) {
+ // NOTE: Fills the first statement after running, just to ensure that any assigned value has _a_ lifetime
+ if( const auto* de = lv.opt_Variable() )
+ {
+ MIR_Helper_GetLifetimes_DetermineValueLifetime(state, fcn, bb_idx, stmt_idx, lv, block_offsets, variable_lifetimes[*de]);
+ variable_lifetimes[*de].fill(block_offsets, bb_idx, stmt_idx, stmt_idx);
+ }
+ else if( const auto* de = lv.opt_Temporary() )
+ {
+ MIR_Helper_GetLifetimes_DetermineValueLifetime(state, fcn, bb_idx, stmt_idx, lv, block_offsets, temporary_lifetimes[de->idx]);
+ temporary_lifetimes[de->idx].fill(block_offsets, bb_idx, stmt_idx, stmt_idx);
+ }
+ else
+ {
+ // Not a direct assignment of a slot
+ }
+ };
+
+ const auto& bb = fcn.blocks[bb_idx];
+ for(size_t stmt_idx = 0; stmt_idx < bb.statements.size(); stmt_idx ++)
+ {
+ state.set_cur_stmt(bb_idx, stmt_idx);
+ const auto& stmt = bb.statements[stmt_idx];
+ if( const auto* se = stmt.opt_Assign() )
+ {
+ // For assigned variables, determine how long that value will live
+ assigned_lvalue(bb_idx, stmt_idx+1, se->dst);
+ }
+ else if( const auto* se = stmt.opt_Asm() )
+ {
+ for(const auto& e : se->outputs)
+ {
+ assigned_lvalue(bb_idx, stmt_idx+1, e.second);
+ }
+ }
+ }
+ state.set_cur_stmt_term(bb_idx);
+
+ // Only Call can assign a value
+ TU_IFLET(::MIR::Terminator, bb.terminator, Call, te,
+ assigned_lvalue(te.ret_block, 0, te.ret_val);
+ )
+ }
+
+ // Dump out variable lifetimes.
+ if( dump_debug )
+ {
+ for(unsigned int i = 0; i < temporary_lifetimes.size(); i ++)
+ {
+ temporary_lifetimes[i].dump_debug("tmp", i, block_offsets);
+ }
+ for(unsigned int i = 0; i < variable_lifetimes.size(); i ++)
+ {
+ variable_lifetimes[i].dump_debug("var", i, block_offsets);
+ }
+ }
+
+
+ ::MIR::ValueLifetimes rv;
+ rv.m_block_offsets = mv$(block_offsets);
+ rv.m_temporaries.reserve( temporary_lifetimes.size() );
+ for(auto& lft : temporary_lifetimes)
+ rv.m_temporaries.push_back( ::MIR::ValueLifetime(mv$(lft.stmt_bitmap)) );
+ rv.m_variables.reserve( variable_lifetimes.size() );
+ for(auto& lft : variable_lifetimes)
+ rv.m_variables.push_back( ::MIR::ValueLifetime(mv$(lft.stmt_bitmap)) );
+ return rv;
+}
+void MIR_Helper_GetLifetimes_DetermineValueLifetime(
+ ::MIR::TypeResolve& mir_res, const ::MIR::Function& fcn,
+ size_t bb_idx, size_t stmt_idx, // First statement in which the value is valid (after the assignment)
+ const ::MIR::LValue& lv, const ::std::vector<size_t>& block_offsets, ValueLifetime& vl
+ )
+{
+ TRACE_FUNCTION_F(mir_res << " " << lv);
+ // Walk the BB tree until:
+ // - Loopback
+ // - Assignment
+ // - Drop
+
+ struct State
+ {
+ const ::std::vector<size_t>& m_block_offsets;
+ ValueLifetime& m_out_vl;
+
+ ::std::vector<unsigned int> bb_history;
+ size_t last_read_ofs; // Statement index
+ bool m_is_borrowed;
+
+ State(const ::std::vector<size_t>& block_offsets, ValueLifetime& vl, size_t init_bb_idx, size_t init_stmt_idx):
+ m_block_offsets(block_offsets),
+ m_out_vl(vl),
+ bb_history(),
+ last_read_ofs(init_stmt_idx),
+ m_is_borrowed(false)
+ {
+ bb_history.push_back(init_bb_idx);
+ }
+ State(State&& x):
+ m_block_offsets(x.m_block_offsets),
+ m_out_vl(x.m_out_vl),
+ bb_history( mv$(x.bb_history) ),
+ last_read_ofs( x.last_read_ofs ),
+ m_is_borrowed( x.m_is_borrowed )
+ {
+ }
+ State& operator=(State&& x) {
+ this->bb_history = mv$(x.bb_history);
+ this->last_read_ofs = x.last_read_ofs;
+ this->m_is_borrowed = x.m_is_borrowed;
+ return *this;
+ }
+
+ State clone() const {
+ State rv { m_block_offsets, m_out_vl, 0, last_read_ofs };
+ rv.bb_history = bb_history;
+ rv.m_is_borrowed = m_is_borrowed;
+ return rv;
+ }
+
+ // Returns true if the variable has been borrowed
+ bool is_borrowed() const {
+ return this->m_is_borrowed;
+ }
+
+ void mark_borrowed(size_t stmt_idx) {
+ if( ! m_is_borrowed )
+ {
+ m_is_borrowed = false;
+ this->fill_to(stmt_idx);
+ }
+ m_is_borrowed = true;
+ }
+ void mark_read(size_t stmt_idx) {
+ if( !m_is_borrowed )
+ {
+ this->fill_to(stmt_idx);
+ }
+ else
+ {
+ m_is_borrowed = false;
+ this->fill_to(stmt_idx);
+ m_is_borrowed = true;
+ }
+ }
+ void fmt(::std::ostream& os) const {
+ os << "BB" << bb_history.front() << "/" << last_read_ofs << "--";
+ os << "[" << bb_history << "]";
+ }
+ void finalise(size_t stmt_idx)
+ {
+ if( m_is_borrowed )
+ {
+ m_is_borrowed = false;
+ this->fill_to(stmt_idx);
+ m_is_borrowed = true;
+ }
+ }
+ private:
+ void fill_to(size_t stmt_idx)
+ {
+ TRACE_FUNCTION_F(FMT_CB(ss, this->fmt(ss);));
+ assert( !m_is_borrowed );
+ assert(bb_history.size() > 0);
+ if( bb_history.size() == 1 )
+ {
+ // only one block
+ m_out_vl.fill(m_block_offsets, bb_history[0], last_read_ofs, stmt_idx);
+ }
+ else
+ {
+ // First block.
+ auto init_bb_idx = bb_history[0];
+ auto limit_0 = m_block_offsets[init_bb_idx+1] - m_block_offsets[init_bb_idx] - 1;
+ m_out_vl.fill(m_block_offsets, init_bb_idx, last_read_ofs, limit_0);
+
+ // Middle blocks
+ for(size_t i = 1; i < bb_history.size()-1; i++)
+ {
+ size_t bb_idx = bb_history[i];
+ assert(bb_idx+1 < m_block_offsets.size());
+ size_t limit = m_block_offsets[bb_idx+1] - m_block_offsets[bb_idx] - 1;
+ m_out_vl.fill(m_block_offsets, bb_idx, 0, limit);
+ }
+
+ // Last block
+ auto bb_idx = bb_history.back();
+ m_out_vl.fill(m_block_offsets, bb_idx, 0, stmt_idx);
+ }
+
+ last_read_ofs = stmt_idx;
+
+ auto cur = this->bb_history.back();
+ this->bb_history.clear();
+ this->bb_history.push_back(cur);
+ }
+ };
+
+ struct Runner
+ {
+ ::MIR::TypeResolve& m_mir_res;
+ const ::MIR::Function& m_fcn;
+ size_t m_init_bb_idx;
+ size_t m_init_stmt_idx;
+ const ::MIR::LValue& m_lv;
+ const ::std::vector<size_t>& m_block_offsets;
+ ValueLifetime& m_lifetimes;
+ bool m_is_copy;
+
+ ::std::vector<bool> m_visited_statements;
+
+ ::std::vector<::std::pair<size_t, State>> m_states_to_do;
+
+ Runner(::MIR::TypeResolve& mir_res, const ::MIR::Function& fcn, size_t init_bb_idx, size_t init_stmt_idx, const ::MIR::LValue& lv, const ::std::vector<size_t>& block_offsets, ValueLifetime& vl):
+ m_mir_res(mir_res),
+ m_fcn(fcn),
+ m_init_bb_idx(init_bb_idx),
+ m_init_stmt_idx(init_stmt_idx),
+ m_lv(lv),
+ m_block_offsets(block_offsets),
+ m_lifetimes(vl),
+
+ m_visited_statements( m_lifetimes.stmt_bitmap.size() )
+ {
+ ::HIR::TypeRef tmp;
+ m_is_copy = m_mir_res.m_resolve.type_is_copy(mir_res.sp, m_mir_res.get_lvalue_type(tmp, lv));
+ }
+
+ void run_block(size_t bb_idx, size_t stmt_idx, State state)
+ {
+ const auto& bb = m_fcn.blocks.at(bb_idx);
+ assert(stmt_idx <= bb.statements.size());
+
+ bool was_moved = false;
+ bool was_updated = false;
+ auto visit_cb = [&](const auto& lv, auto vu) {
+ if(lv == m_lv) {
+ if( vu == ValUsage::Read ) {
+ DEBUG(m_mir_res << "Used");
+ state.mark_read(stmt_idx);
+ was_updated = true;
+ }
+ if( vu == ValUsage::Move ) {
+ DEBUG(m_mir_res << (m_is_copy ? "Read" : "Moved"));
+ state.mark_read(stmt_idx);
+ was_moved = ! m_is_copy;
+ }
+ if( vu == ValUsage::Borrow ) {
+ DEBUG(m_mir_res << "Borrowed");
+ state.mark_borrowed(stmt_idx);
+ was_updated = true;
+ }
+ return true;
+ }
+ return false;
+ };
+
+ for( ; stmt_idx < bb.statements.size(); stmt_idx ++)
+ {
+ const auto& stmt = bb.statements[stmt_idx];
+ m_mir_res.set_cur_stmt(bb_idx, stmt_idx);
+ m_visited_statements[ m_block_offsets.at(bb_idx) + stmt_idx ] = true;
+
+ // Visit and see if the value is read (setting the read flag or end depending on if the value is Copy)
+ visit_mir_lvalues(stmt, visit_cb);
+
+ if( was_moved )
+ {
+ // Moved: Update read position and apply
+ DEBUG(m_mir_res << "Moved, return");
+ state.mark_read(stmt_idx);
+ state.finalise(stmt_idx);
+ return ;
+ }
+
+ TU_MATCHA( (stmt), (se),
+ (Assign,
+ if( se.dst == m_lv )
+ {
+ DEBUG(m_mir_res << "- Assigned to, return");
+ // Value assigned, just apply
+ state.finalise(stmt_idx);
+ return ;
+ }
+ ),
+ (Drop,
+ visit_mir_lvalue(se.slot, ValUsage::Read, visit_cb);
+ if( se.slot == m_lv )
+ {
+ // Value dropped, update read position and apply
+ DEBUG(m_mir_res << "- Dropped, return");
+ // - If it was borrowed, it can't still be borrowed here.
+ // TODO: Enable this once it's known to not cause mis-optimisation. It could currently.
+ //if( state.is_borrowed() ) {
+ // state.clear_borrowed();
+ //}
+ state.mark_read(stmt_idx);
+ state.finalise(stmt_idx);
+ return ;
+ }
+ ),
+ (Asm,
+ //
+ for(const auto& e : se.outputs)
+ {
+ if(e.second == m_lv) {
+ // Assigned, just apply
+ DEBUG(m_mir_res << "- Assigned (asm!), return");
+ state.finalise(stmt_idx);
+ return ;
+ }
+ }
+ ),
+ (SetDropFlag,
+ // Ignore
+ ),
+ (ScopeEnd,
+ // Ignore
+ )
+ )
+ }
+ m_mir_res.set_cur_stmt_term(bb_idx);
+ m_visited_statements[ m_block_offsets.at(bb_idx) + stmt_idx ] = true;
+
+ visit_mir_lvalues(bb.terminator, visit_cb);
+
+ if( was_moved )
+ {
+ // Moved: Update read position and apply
+ DEBUG(m_mir_res << "- Moved, return");
+ state.mark_read(stmt_idx);
+ state.finalise(stmt_idx);
+ return ;
+ }
+
+ // Terminator
+ TU_MATCHA( (bb.terminator), (te),
+ (Incomplete,
+ // TODO: Isn't this a bug?
+ DEBUG(m_mir_res << "Incomplete");
+ state.finalise(stmt_idx);
+ ),
+ (Return,
+ DEBUG(m_mir_res << "Return");
+ state.finalise(stmt_idx);
+ ),
+ (Diverge,
+ DEBUG(m_mir_res << "Diverge");
+ state.finalise(stmt_idx);
+ ),
+ (Goto,
+ m_states_to_do.push_back( ::std::make_pair(te, mv$(state)) );
+ ),
+ (Panic,
+ m_states_to_do.push_back( ::std::make_pair(te.dst, mv$(state)) );
+ ),
+ (If,
+ m_states_to_do.push_back( ::std::make_pair(te.bb0, state.clone()) );
+ m_states_to_do.push_back( ::std::make_pair(te.bb1, mv$(state)) );
+ ),
+ (Switch,
+ for(size_t i = 0; i < te.targets.size(); i ++)
+ {
+ auto s = (i == te.targets.size()-1)
+ ? mv$(state)
+ : state.clone();
+ m_states_to_do.push_back( ::std::make_pair(te.targets[i], mv$(s)) );
+ }
+ ),
+ (Call,
+ if( te.ret_val == m_lv )
+ {
+ DEBUG(m_mir_res << "Assigned (Call), return");
+ // Value assigned, just apply
+ state.finalise(stmt_idx);
+ return ;
+ }
+ if( m_fcn.blocks.at(te.panic_block).statements.size() == 0 && m_fcn.blocks.at(te.panic_block).terminator.is_Diverge() ) {
+ // Shortcut: Don't create a new state if the panic target is Diverge
+ }
+ else {
+ m_states_to_do.push_back( ::std::make_pair(te.panic_block, state.clone()) );
+ }
+ m_states_to_do.push_back( ::std::make_pair(te.ret_block, mv$(state)) );
+ )
+ )
+ }
+ };
+
+ ::std::vector<bool> use_bitmap(vl.stmt_bitmap.size()); // Bitmap of locations where this value is used.
+ {
+ size_t pos = 0;
+ for(const auto& bb : fcn.blocks)
+ {
+ for(const auto& stmt : bb.statements)
+ {
+ use_bitmap[pos] = visit_mir_lvalues(stmt, [&](const ::MIR::LValue& tlv, auto vu){ return tlv == lv && vu != ValUsage::Write; });
+ pos ++;
+ }
+ use_bitmap[pos] = visit_mir_lvalues(bb.terminator, [&](const ::MIR::LValue& tlv, auto vu){ return tlv == lv && vu != ValUsage::Write; });
+ pos ++;
+ }
+ }
+
+ Runner runner(mir_res, fcn, bb_idx, stmt_idx, lv, block_offsets, vl);
+ ::std::vector< ::std::pair<size_t,State>> post_check_list;
+
+ // TODO: Have a bitmap of visited statements. If a visted statement is hit, stop the current state
+ // - Use the same rules as loopback.
+
+ // Fill the first statement, to ensure that there is at least one bit set.
+ runner.run_block(bb_idx, stmt_idx, State(block_offsets, vl, bb_idx, stmt_idx));
+
+ while( ! runner.m_states_to_do.empty() )
+ {
+ auto bb_idx = runner.m_states_to_do.back().first;
+ auto state = mv$(runner.m_states_to_do.back().second);
+ runner.m_states_to_do.pop_back();
+
+ DEBUG("state.bb_history=[" << state.bb_history << "], -> BB" << bb_idx);
+ state.bb_history.push_back(bb_idx);
+
+ if( runner.m_visited_statements.at( block_offsets.at(bb_idx) + 0 ) )
+ {
+ if( vl.stmt_bitmap.at( block_offsets.at(bb_idx) + 0) )
+ {
+ DEBUG("Looped (to already valid)");
+ state.mark_read(0);
+ state.finalise(0);
+ continue ;
+ }
+ else if( state.is_borrowed() )
+ {
+ DEBUG("Looped (borrowed)");
+ state.mark_read(0);
+ state.finalise(0);
+ continue ;
+ }
+ else
+ {
+ // Put this state elsewhere and check if the variable is known valid at that point.
+ DEBUG("Looped (after last read), push for later");
+ post_check_list.push_back( ::std::make_pair(bb_idx, mv$(state)) );
+ continue ;
+ }
+ }
+
+#if 0
+ // TODO: Have a bitmap of if a BB mentions this value. If there are no unvisited BBs that mention this value, stop early.
+ // - CATCH: The original BB contains a reference, but might not have been visited (if it was the terminating call that triggered)
+ // - Also, we don't want to give up early (if we loop back to the start of the first block)
+ // - A per-statement bitmap would solve this. Return early if `!vl.stmt_bitmap & usage_stmt_bitmap == 0`
+ // > Requires filling the bitmap as we go (for maximum efficiency)
+ {
+ bool found_non_visited = false;
+ for(size_t i = 0; i < use_bitmap.size(); i ++)
+ {
+ // If a place where the value is used is not present in the output bitmap
+ if( !vl.stmt_bitmap[i] && use_bitmap[i] )
+ {
+ DEBUG("- Still used at +" << i);
+ found_non_visited = true;
+ }
+ }
+ // If there were no uses of the variable that aren't covered by the lifetime bitmap
+ if( ! found_non_visited )
+ {
+ // Terminate early
+ DEBUG("Early terminate - All possible lifetimes covered");
+ state.finalise(0);
+ for(auto& s : runner.m_states_to_do)
+ {
+ s.second.bb_history.push_back(bb_idx);
+ s.second.finalise(0);
+ }
+ return ;
+ }
+ }
+#endif
+
+ // Special case for when doing multiple runs on the same output
+ if( vl.stmt_bitmap.at( block_offsets.at(bb_idx) + 0) )
+ {
+ DEBUG("Already valid in BB" << bb_idx);
+ state.mark_read(0);
+ state.finalise(0);
+ continue;
+ }
+#if 0
+ // TODO: Have a way of knowing if a state will never find a use (the negative of the above)
+ // - Requires knowing for sure that a BB doesn't end up using the value.
+ // - IDEA: Store a fork count and counts of Yes/No for each BB.
+ // > If ForkCount == No, the value isn't used in that branch.
+ if( runner.m_bb_counts[bb_idx].visit_count > 0
+ && runner.m_bb_counts[bb_idx].visit_count == runner.m_bb_counts[bb_idx].val_unused_count )
+ {
+ DEBUG("Target BB known to be not valid");
+ runner.apply_state(state, 0);
+ continue ;
+ }
+ runner.m_bb_counts[bb_idx].visit_count ++;
+#endif
+
+ runner.run_block(bb_idx, 0, mv$(state));
+ }
+
+ // Iterate while there are items in the post_check list
+ while( !post_check_list.empty() )
+ {
+ bool change = false;
+ for(auto it = post_check_list.begin(); it != post_check_list.end(); )
+ {
+ auto bb_idx = it->first;
+ auto& state = it->second;
+ // If the target of this loopback is valid, then the entire route to the loopback must have been valid
+ if( vl.stmt_bitmap.at( block_offsets.at(bb_idx) + 0) )
+ {
+ change = true;
+ DEBUG("Looped (now valid)");
+ state.mark_read(0);
+ state.finalise(0);
+
+ it = post_check_list.erase(it);
+ }
+ else
+ {
+ ++ it;
+ }
+ }
+ // Keep going while changes happen
+ if( !change )
+ break;
+ }
+}
+
+#else
+
+::MIR::ValueLifetimes MIR_Helper_GetLifetimes(::MIR::TypeResolve& state, const ::MIR::Function& fcn, bool dump_debug)
+{
+ TRACE_FUNCTION_F(state);
+ // New algorithm notes:
+ // ---
+ // The lifetime of a value starts when it is written, and ends the last time it is read
+ // - When a variable is read, end any existing lifetime and start a new one.
+ // - When the value is read, update the end of its lifetime.
+ // ---
+ // A lifetime is a range in the call graph (with a start and end, including list of blocks)
+ // - Representation: Bitmap with a bit per statement.
+ // - Record the current block path in general state, along with known active lifetimes
+
+ // TODO: If a value is borrowed, assume it lives forevermore
+ // - Ideally there would be borrow tracking to determine its actual required lifetime.
+ // - NOTE: This doesn't impact the borrows themselves, just the borrowee
+
+ // TODO: Add a statement type StorageDead (or similar?) that indicates the point where a values scope ends
+
+ // Scan through all possible paths in the graph (with loopback detection using a memory of the path)
+ // - If a loop is detected, determine if there were changes to the lifetime set during that pass
+ // > Changes are noticed by recording in the state structure when it triggers a change in the lifetime
+ // map.
+ struct Position
+ {
+ size_t path_index = 0; // index into the block path.
+ unsigned int stmt_idx = 0;
+
+ bool operator==(const Position& x) const {
+ return path_index == x.path_index && stmt_idx == x.stmt_idx;
+ }
+ };
+ struct ProtoLifetime
+ {
+ Position start;
+ Position end;
+
+ bool is_empty() const {
+ return start == end;
+ }
+ bool is_borrowed() const {
+ return this->end == Position { ~0u, ~0u };
+ }
+ };
+ static unsigned NEXT_INDEX = 0;
+ struct State
+ {
+ unsigned int index = 0;
+ ::std::vector<unsigned int> block_path;
+ ::std::vector<unsigned int> block_change_idx;
+ unsigned int cur_change_idx = 0;
+
+ // if read, update. If set, save and update
+ ::std::vector<ProtoLifetime> tmp_ends;
+ ::std::vector<ProtoLifetime> var_ends;
+
+ State(const ::MIR::Function& fcn):
+ tmp_ends( fcn.temporaries.size(), ProtoLifetime() ),
+ var_ends( fcn.named_variables.size(), ProtoLifetime() )
+ {
+ }
+
+ State clone() const {
+ auto rv = *this;
+ rv.index = ++NEXT_INDEX;
+ return rv;
+ }
+ };
+ NEXT_INDEX = 0;
+
+ size_t statement_count = 0;
+ ::std::vector<size_t> block_offsets;
+ block_offsets.reserve( fcn.blocks.size() );
+ for(const auto& bb : fcn.blocks)
+ {
+ block_offsets.push_back(statement_count);
+ statement_count += bb.statements.size() + 1; // +1 for the terminator
+ }
+
+ ::std::vector<ValueLifetime> temporary_lifetimes( fcn.temporaries.size(), ValueLifetime(statement_count) );
+ ::std::vector<ValueLifetime> variable_lifetimes( fcn.named_variables.size(), ValueLifetime(statement_count) );
+
+ struct BlockSeenLifetimes {
+ bool m_has_state = false;
+ const ::std::vector<size_t>& block_offsets;
+ ::std::vector< ::std::vector<unsigned int> > tmp;
+ ::std::vector< ::std::vector<unsigned int> > var;
+
+ BlockSeenLifetimes(const ::std::vector<size_t>& block_offsets, const ::MIR::Function& fcn):
+ block_offsets( block_offsets ),
+ tmp( fcn.temporaries.size() ),
+ var( fcn.named_variables.size() )
+ {}
+
+ bool has_state() const
+ {
+ return m_has_state;
+ }
+
+ bool try_merge(const State& val_state) const
+ {
+ // TODO: This logic isn't quite correct. Just becase a value's existing end is already marked as valid,
+ // doesn't mean that we have no new information.
+ // - Wait, doesn't it?
+ auto try_merge_lft = [&](const ProtoLifetime& lft, const ::std::vector<unsigned int>& seen)->bool {
+ if(lft.is_empty()) return false;
+ // TODO: What should be done for borrow flagged values
+ if(lft.is_borrowed()) return false;
+ auto end_idx = block_offsets.at( val_state.block_path.at(lft.end.path_index) ) + lft.end.stmt_idx;
+
+ auto it = ::std::find(seen.begin(), seen.end(), end_idx);
+ return (it == seen.end());
+ };
+ for(size_t i = 0; i < val_state.tmp_ends.size(); i++)
+ {
+ if( try_merge_lft(val_state.tmp_ends[i], this->tmp[i]) )
+ return true;
+ }
+ for(size_t i = 0; i < val_state.var_ends.size(); i++)
+ {
+ if( try_merge_lft(val_state.var_ends[i], this->var[i]) )
+ return true;
+ }
+ return false;
+ }
+
+ bool merge(const State& val_state)
+ {
+ bool rv = false;
+ auto merge_lft = [&](const ProtoLifetime& lft, ::std::vector<unsigned int>& seen)->bool {
+ if(lft.is_empty()) return false;
+ // TODO: What should be done for borrow flagged values
+ if(lft.end == Position { ~0u, ~0u }) return false;
+ auto end_idx = block_offsets.at( val_state.block_path.at(lft.end.path_index) ) + lft.end.stmt_idx;
+
+ auto it = ::std::find(seen.begin(), seen.end(), end_idx);
+ if( it == seen.end() )
+ {
+ seen.push_back( end_idx );
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ };
+ for(size_t i = 0; i < val_state.tmp_ends.size(); i++)
+ {
+ rv |= merge_lft(val_state.tmp_ends[i], this->tmp[i]);
+ }
+ for(size_t i = 0; i < val_state.var_ends.size(); i++)
+ {
+ rv |= merge_lft(val_state.var_ends[i], this->var[i]);
+ }
+ m_has_state = true;
+ return rv;
+ }
+ };
+ ::std::vector<BlockSeenLifetimes> block_seen_lifetimes( fcn.blocks.size(), BlockSeenLifetimes(block_offsets, fcn) );
+
+ State init_state(fcn);
+
+ ::std::vector<::std::pair<unsigned int, State>> todo_queue;
+ todo_queue.push_back(::std::make_pair( 0, mv$(init_state) ));
+
+ while(!todo_queue.empty())
+ {
+ auto bb_idx = todo_queue.back().first;
+ auto val_state = mv$(todo_queue.back().second);
+ todo_queue.pop_back();
+ state.set_cur_stmt(bb_idx, 0);
+
+ // Fill alive time in the bitmap
+ // TODO: Maybe also store the range (as a sequence of {block,start,end})
+ auto add_lifetime_s = [&](State& val_state, const ::MIR::LValue& lv, const Position& start, const Position& end) {
+ assert(start.path_index <= end.path_index);
+ assert(start.path_index < end.path_index || start.stmt_idx <= end.stmt_idx);
+ if(start.path_index == end.path_index && start.stmt_idx == end.stmt_idx)
+ return;
+ DEBUG("[add_lifetime] " << lv << " (" << start.path_index << "," << start.stmt_idx << ") -- (" << end.path_index << "," << end.stmt_idx << ")");
+ ValueLifetime* lft;
+ if(const auto* e = lv.opt_Temporary())
+ {
+ lft = &temporary_lifetimes[e->idx];
+ }
+ else if(const auto* e = lv.opt_Variable())
+ {
+ lft = &variable_lifetimes[*e];
+ }
+ else
+ {
+ MIR_TODO(state, "[add_lifetime] " << lv);
+ return;
+ }
+
+ // Fill lifetime map for this temporary in the indicated range
+ bool did_set = false;
+ unsigned int j = start.stmt_idx;
+ unsigned int i = start.path_index;
+ while( i <= end.path_index && i < val_state.block_path.size() )
+ {
+ auto bb_idx = val_state.block_path.at(i);
+ const auto& bb = fcn.blocks[bb_idx];
+ MIR_ASSERT(state, j <= bb.statements.size(), "");
+ MIR_ASSERT(state, bb_idx < block_offsets.size(), "");
+
+ auto block_base = block_offsets.at(bb_idx);
+ auto idx = block_base + j;
+ if( !lft->stmt_bitmap.at(idx) )
+ {
+ lft->stmt_bitmap[idx] = true;
+ did_set = true;
+ }
+
+ if( i == end.path_index && j == (end.stmt_idx != ~0u ? end.stmt_idx : bb.statements.size()) )
+ break;
+
+ // If the current index is the terminator (one after the size)
+ if(j == bb.statements.size())
+ {
+ j = 0;
+ i++;
+ }
+ else
+ {
+ j ++;
+ }
+ }
+
+ // - If the above set a new bit, increment `val_state.cur_change_idx`
+ if( did_set )
+ {
+ DEBUG("[add_lifetime] " << lv << " (" << start.path_index << "," << start.stmt_idx << ") -- (" << end.path_index << "," << end.stmt_idx << ") - New information");
+ val_state.cur_change_idx += 1;
+ }
+ };
+ auto add_lifetime = [&](const ::MIR::LValue& lv, const Position& start, const Position& end) {
+ add_lifetime_s(val_state, lv, start, end);
+ };
+
+ auto apply_state = [&](State& state) {
+ // Apply all changes in this state, just in case there was new information
+ for(unsigned i = 0; i < fcn.temporaries.size(); i++)
+ add_lifetime_s( state, ::MIR::LValue::make_Temporary({i}), state.tmp_ends[i].start, state.tmp_ends[i].end );
+ for(unsigned i = 0; i < fcn.named_variables.size(); i++)
+ add_lifetime_s( state, ::MIR::LValue::make_Variable({i}), state.var_ends[i].start, state.var_ends[i].end );
+ };
+ auto add_to_visit = [&](unsigned int new_bb_idx, State new_state) {
+ auto& bb_memory_ent = block_seen_lifetimes[new_bb_idx];
+ if( !bb_memory_ent.has_state() )
+ {
+ // No recorded state, needs to be visited
+ DEBUG(state << " state" << new_state.index << " -> bb" << new_bb_idx << " (no existing state)");
+ }
+ else if( bb_memory_ent.try_merge(new_state) )
+ {
+ // This state has new information, needs to be visited
+ DEBUG(state << " state" << new_state.index << " -> bb" << new_bb_idx << " (new info)");
+ }
+ else
+ {
+ // Skip
+ // TODO: Acquire from the target block the actual end of any active lifetimes, then apply them.
+ DEBUG(state << " state" << new_state.index << " -> bb" << new_bb_idx << " - No new state, no push");
+ // - For all variables currently active, check if they're valid in the first statement of the target block.
+ // - If so, mark as valid at the end of the current block
+ auto bm_idx = block_offsets[new_bb_idx];
+ Position cur_pos;
+ cur_pos.path_index = val_state.block_path.size() - 1;
+ cur_pos.stmt_idx = fcn.blocks[bb_idx].statements.size();
+ for(unsigned i = 0; i < fcn.temporaries.size(); i++) {
+ if( ! new_state.tmp_ends[i].is_empty() && temporary_lifetimes[i].stmt_bitmap[bm_idx] ) {
+ DEBUG("- tmp$" << i << " - Active in target, assume active");
+ new_state.tmp_ends[i].end = cur_pos;
+ }
+ }
+ for(unsigned i = 0; i < fcn.named_variables.size(); i++) {
+ if( ! new_state.var_ends[i].is_empty() && variable_lifetimes[i].stmt_bitmap[bm_idx] ) {
+ DEBUG("- var$" << i << " - Active in target, assume active");
+ new_state.var_ends[i].end = cur_pos;
+ }
+ }
+ // - Apply whatever state was still active
+ apply_state(new_state);
+ return ;
+ }
+ todo_queue.push_back(::std::make_pair( new_bb_idx, mv$(new_state) ));
+ };
+
+ // Compare this state to a composite list of lifetimes seen in this block
+ // - Just compares the end of each proto lifetime
+ {
+ auto& bb_memory_ent = block_seen_lifetimes[bb_idx];
+ bool had_state = bb_memory_ent.has_state();
+ bool has_new = bb_memory_ent.merge(val_state);
+
+ if( !has_new && had_state )
+ {
+ DEBUG(state << " state" << val_state.index << " - No new entry state");
+ apply_state(val_state);
+
+ continue ;
+ }
+ }
+
+ // Check if this state has visited this block before, and if anything changed since last time
+ {
+ auto it = ::std::find(val_state.block_path.rbegin(), val_state.block_path.rend(), bb_idx);
+ if( it != val_state.block_path.rend() )
+ {
+ auto idx = &*it - &val_state.block_path.front();
+ if( val_state.block_change_idx[idx] == val_state.cur_change_idx )
+ {
+ DEBUG(state << " " << val_state.index << " Loop and no change");
+ continue ;
+ }
+ else
+ {
+ assert( val_state.block_change_idx[idx] < val_state.cur_change_idx );
+ DEBUG(state << " " << val_state.index << " --- Loop, " << val_state.cur_change_idx - val_state.block_change_idx[idx] << " changes");
+ }
+ }
+ else
+ {
+ DEBUG(state << " " << val_state.index << " ---");
+ }
+ val_state.block_path.push_back(bb_idx);
+ val_state.block_change_idx.push_back( val_state.cur_change_idx );
+ }
+
+ Position cur_pos;
+ cur_pos.path_index = val_state.block_path.size() - 1;
+ cur_pos.stmt_idx = 0;
+ auto lvalue_read = [&](const ::MIR::LValue& lv) {
+ ProtoLifetime* slot;
+ if(const auto* e = lv.opt_Temporary()) {
+ slot = &val_state.tmp_ends.at(e->idx);
+ }
+ else if(const auto* e = lv.opt_Variable()) {
+ slot = &val_state.var_ends.at(*e);
+ }
+ else {
+ return ;
+ }
+ // Update the last read location
+ //DEBUG("Update END " << lv << " to " << cur_pos);
+ slot->end = cur_pos;
+ };
+ auto lvalue_set = [&](const ::MIR::LValue& lv) {
+ ProtoLifetime* slot;
+ if(const auto* e = lv.opt_Temporary()) {
+ slot = &val_state.tmp_ends.at(e->idx);
+ }
+ else if(const auto* e = lv.opt_Variable()) {
+ slot = &val_state.var_ends.at(*e);
+ }
+ else {
+ return ;
+ }
+ // End whatever value was originally there, and insert this new one
+ slot->end = cur_pos;
+ add_lifetime(lv, slot->start, slot->end);
+ slot->start = cur_pos;
+ };
+ auto lvalue_borrow = [&](const ::MIR::LValue& lv) {
+ ProtoLifetime* slot;
+ if(const auto* e = lv.opt_Temporary()) {
+ slot = &val_state.tmp_ends.at(e->idx);
+ }
+ else if(const auto* e = lv.opt_Variable()) {
+ slot = &val_state.var_ends.at(*e);
+ }
+ else {
+ return ;
+ }
+ // TODO: Flag this value as currently being borrowed (a flag that never clears)
+ slot->end = Position { ~0u, ~0u };
+ };
+ auto visit_lval_cb = [&](const auto& lv, ValUsage vu)->bool{
+ if(vu == ValUsage::Read)
+ lvalue_read(lv);
+ if(vu == ValUsage::Borrow)
+ lvalue_borrow(lv);
+ if(vu == ValUsage::Write)
+ lvalue_set(lv);
+ return false;
+ };
+
+ // Run statements
+ for(const auto& stmt : fcn.blocks[bb_idx].statements)
+ {
+ auto stmt_idx = &stmt - &fcn.blocks[bb_idx].statements.front();
+ cur_pos.stmt_idx = stmt_idx;
+ state.set_cur_stmt(bb_idx, stmt_idx);
+ DEBUG(state << " " << stmt);
+
+ if( const auto* e = stmt.opt_Drop() )
+ {
+ visit_mir_lvalues(stmt, [&](const auto& lv, ValUsage vu)->bool{
+ if(vu == ValUsage::Read)
+ lvalue_read(lv);
+ return false;
+ });
+ lvalue_read(e->slot);
+ lvalue_set(e->slot);
+ }
+ else
+ {
+ visit_mir_lvalues(stmt, visit_lval_cb);
+ }
+ }
+ cur_pos.stmt_idx = fcn.blocks[bb_idx].statements.size();
+
+ state.set_cur_stmt_term(bb_idx);
+ DEBUG(state << "TERM " << fcn.blocks[bb_idx].terminator);
+ TU_MATCH(::MIR::Terminator, (fcn.blocks[bb_idx].terminator), (e),
+ (Incomplete,
+ // Should be impossible here.
+ ),
+ (Return,
+ // End all active lifetimes at their previous location.
+ apply_state(val_state);
+ ),
+ (Diverge,
+ apply_state(val_state);
+ ),
+ (Goto,
+ add_to_visit(e, mv$(val_state));
+ ),
+ (Panic,
+ // What should be done here?
+ ),
+ (If,
+ visit_mir_lvalue(e.cond, ValUsage::Read, visit_lval_cb);
+
+ // Push blocks
+ add_to_visit(e.bb0, val_state.clone());
+ add_to_visit(e.bb1, mv$(val_state));
+ ),
+ (Switch,
+ visit_mir_lvalue(e.val, ValUsage::Read, visit_lval_cb);
+ ::std::set<unsigned int> tgts;
+ for(const auto& tgt : e.targets)
+ tgts.insert(tgt);
+
+ for(const auto& tgt : tgts)
+ {
+ auto vs = (tgt == *tgts.rbegin() ? mv$(val_state) : val_state.clone());
+ add_to_visit(tgt, mv$(vs));
+ }
+ ),
+ (Call,
+ if( const auto* f = e.fcn.opt_Value() )
+ visit_mir_lvalue(*f, ValUsage::Read, visit_lval_cb);
+ for(const auto& arg : e.args)
+ if( const auto* e = arg.opt_LValue() )
+ visit_mir_lvalue(*e, ValUsage::Read, visit_lval_cb);
+
+ // Push blocks (with return valid only in one)
+ add_to_visit(e.panic_block, val_state.clone());
+
+ // TODO: If the function returns !, don't follow the ret_block
+ lvalue_set(e.ret_val);
+ add_to_visit(e.ret_block, mv$(val_state));
+ )
+ )
+ }
+
+ // Dump out variable lifetimes.
+ if( dump_debug )
+ {
+ for(unsigned int i = 0; i < temporary_lifetimes.size(); i ++)
+ {
+ temporary_lifetimes[i].dump_debug("tmp", i, block_offsets);
+ }
+ for(unsigned int i = 0; i < variable_lifetimes.size(); i ++)
+ {
+ variable_lifetimes[i].dump_debug("var", i, block_offsets);
+ }
+ }
+
+ // Move lifetime bitmaps into the variable for the below code
+ ::MIR::ValueLifetimes rv;
+ rv.m_block_offsets = mv$(block_offsets);
+ rv.m_temporaries.reserve( temporary_lifetimes.size() );
+ for(auto& lft : temporary_lifetimes)
+ rv.m_temporaries.push_back( ::MIR::ValueLifetime(mv$(lft.stmt_bitmap)) );
+ rv.m_variables.reserve( variable_lifetimes.size() );
+ for(auto& lft : variable_lifetimes)
+ rv.m_variables.push_back( ::MIR::ValueLifetime(mv$(lft.stmt_bitmap)) );
+
+ return rv;
+}
+#endif
diff --git a/src/mir/helpers.hpp b/src/mir/helpers.hpp
index c91e4198..802ce88f 100644
--- a/src/mir/helpers.hpp
+++ b/src/mir/helpers.hpp
@@ -23,6 +23,10 @@ class Function;
class LValue;
class Constant;
struct BasicBlock;
+class Terminator;
+class Statement;
+class RValue;
+class Param;
typedef unsigned int BasicBlockId;
@@ -78,6 +82,7 @@ public:
this->bb_idx = bb_idx;
this->stmt_idx = stmt_idx;
}
+ unsigned int get_cur_stmt_ofs() const;
void set_cur_stmt_term(unsigned int bb_idx) {
this->bb_idx = bb_idx;
this->stmt_idx = STMT_TERM;
@@ -107,4 +112,78 @@ public:
}
};
+
+// --------------------------------------------------------------------
+// MIR_Helper_GetLifetimes
+// --------------------------------------------------------------------
+class ValueLifetime
+{
+ ::std::vector<bool> statements;
+
+public:
+ ValueLifetime(::std::vector<bool> stmts):
+ statements( mv$(stmts) )
+ {}
+
+ bool valid_at(size_t ofs) const {
+ return statements.at(ofs);
+ }
+
+ // true if this value is used at any point
+ bool is_used() const {
+ for(auto v : statements)
+ if( v )
+ return true;
+ return false;
+ }
+ bool overlaps(const ValueLifetime& x) const {
+ assert(statements.size() == x.statements.size());
+ for(unsigned int i = 0; i < statements.size(); i ++)
+ {
+ if( statements[i] && x.statements[i] )
+ return true;
+ }
+ return false;
+ }
+ void unify(const ValueLifetime& x) {
+ assert(statements.size() == x.statements.size());
+ for(unsigned int i = 0; i < statements.size(); i ++)
+ {
+ if( x.statements[i] )
+ statements[i] = true;
+ }
+ }
+};
+
+struct ValueLifetimes
+{
+ ::std::vector<size_t> m_block_offsets;
+ ::std::vector<ValueLifetime> m_temporaries;
+ ::std::vector<ValueLifetime> m_variables;
+
+ bool var_valid(unsigned var_idx, unsigned bb_idx, unsigned stmt_idx) const {
+ return m_variables.at(var_idx).valid_at( m_block_offsets[bb_idx] + stmt_idx );
+ }
+ bool tmp_valid(unsigned tmp_idx, unsigned bb_idx, unsigned stmt_idx) const {
+ return m_temporaries.at(tmp_idx).valid_at( m_block_offsets[bb_idx] + stmt_idx );
+ }
+};
+
+namespace visit {
+ enum class ValUsage {
+ Move,
+ Read,
+ Write,
+ Borrow,
+ };
+
+ extern bool visit_mir_lvalue(const ::MIR::LValue& lv, ValUsage u, ::std::function<bool(const ::MIR::LValue& , ValUsage)> cb);
+ extern bool visit_mir_lvalue(const ::MIR::Param& p, ValUsage u, ::std::function<bool(const ::MIR::LValue& , ValUsage)> cb);
+ extern bool visit_mir_lvalues(const ::MIR::RValue& rval, ::std::function<bool(const ::MIR::LValue& , ValUsage)> cb);
+ extern bool visit_mir_lvalues(const ::MIR::Statement& stmt, ::std::function<bool(const ::MIR::LValue& , ValUsage)> cb);
+ extern bool visit_mir_lvalues(const ::MIR::Terminator& term, ::std::function<bool(const ::MIR::LValue& , ValUsage)> cb);
+} // namespace visit
+
} // namespace MIR
+
+extern ::MIR::ValueLifetimes MIR_Helper_GetLifetimes(::MIR::TypeResolve& state, const ::MIR::Function& fcn, bool dump_debug);
diff --git a/src/mir/main_bindings.hpp b/src/mir/main_bindings.hpp
index dc9a61a9..0d6074cb 100644
--- a/src/mir/main_bindings.hpp
+++ b/src/mir/main_bindings.hpp
@@ -15,6 +15,7 @@ class Crate;
extern void HIR_GenerateMIR(::HIR::Crate& crate);
extern void MIR_Dump(::std::ostream& sink, const ::HIR::Crate& crate);
extern void MIR_CheckCrate(/*const*/ ::HIR::Crate& crate);
+extern void MIR_CheckCrate_Full(/*const*/ ::HIR::Crate& crate);
extern void MIR_CleanupCrate(::HIR::Crate& crate);
extern void MIR_OptimiseCrate(::HIR::Crate& crate);
diff --git a/src/mir/mir.cpp b/src/mir/mir.cpp
index 6ae5808b..9edc925b 100644
--- a/src/mir/mir.cpp
+++ b/src/mir/mir.cpp
@@ -24,22 +24,22 @@ namespace MIR {
os << (e.v ? "true" : "false");
),
(Bytes,
- os << "[";
+ os << "b\"";
os << ::std::hex;
for(auto v : e)
- os << static_cast<unsigned int>(v) << " ";
- os << ::std::dec;
- os << "]";
- ),
- (StaticString,
- os << "\"";
- for(auto v : e) {
+ {
if( ' ' <= v && v < 0x7F && v != '"' && v != '\\' )
os << v;
+ else if( v < 16 )
+ os << "\\x0" << (unsigned int)v;
else
- os << "\\u{" << FMT(::std::hex << (unsigned int)v) << "}";
+ os << "\\x" << ((unsigned int)v & 0xFF);
}
os << "\"";
+ os << ::std::dec;
+ ),
+ (StaticString,
+ os << "\"" << FmtEscaped(e) << "\"";
),
(Const,
os << e.p;
@@ -50,34 +50,40 @@ namespace MIR {
)
return os;
}
- bool Constant::operator==(const Constant& b) const
+ ::Ordering Constant::ord(const Constant& b) const
{
if( this->tag() != b.tag() )
- return false;
+ return ::ord( static_cast<unsigned int>(this->tag()), b.tag() );
TU_MATCHA( (*this,b), (ae,be),
(Int,
- return ae.v == be.v && ae.t == be.t;
+ if( ae.v != be.v )
+ return ::ord(ae.v, be.v);
+ return ::ord((unsigned)ae.t, (unsigned)be.t);
),
(Uint,
- return ae.v == be.v && ae.t == be.t;
+ if( ae.v != be.v )
+ return ::ord(ae.v, be.v);
+ return ::ord((unsigned)ae.t, (unsigned)be.t);
),
(Float,
- return ae.v == be.v && ae.t == be.t;
+ if( ae.v != be.v )
+ return ::ord(ae.v, be.v);
+ return ::ord((unsigned)ae.t, (unsigned)be.t);
),
(Bool,
- return ae.v == be.v;
+ return ::ord(ae.v, be.v);
),
(Bytes,
- return ae == be;
+ return ::ord(ae, be);
),
(StaticString,
- return ae == be;
+ return ::ord(ae, be);
),
(Const,
- return ae.p == be.p;
+ return ::ord(ae.p, be.p);
),
(ItemAddr,
- return ae == be;
+ return ::ord(ae, be);
)
)
throw "";
@@ -444,6 +450,51 @@ namespace MIR {
return os;
}
+ ::std::ostream& operator<<(::std::ostream& os, const Statement& x)
+ {
+ TU_MATCHA( (x), (e),
+ (Assign,
+ os << e.dst << " = " << e.src;
+ ),
+ (Asm,
+ os << "(";
+ for(const auto& spec : e.outputs)
+ os << "\"" << spec.first << "\" : " << spec.second << ", ";
+ os << ") = asm!(\"\", input=( ";
+ for(const auto& spec : e.inputs)
+ os << "\"" << spec.first << "\" : " << spec.second << ", ";
+ os << "), clobbers=[" << e.clobbers << "], flags=[" << e.flags << "])";
+ ),
+ (SetDropFlag,
+ os << "df$" << e.idx << " = ";
+ if( e.other == ~0u )
+ {
+ os << e.new_val;
+ }
+ else
+ {
+ os << (e.new_val ? "!" : "") << "df$" << e.other;
+ }
+ ),
+ (Drop,
+ os << "drop(" << e.slot;
+ if(e.kind == ::MIR::eDropKind::SHALLOW)
+ os << " SHALLOW";
+ if(e.flag_idx != ~0u)
+ os << "IF df$" << e.flag_idx;
+ os << ")";
+ ),
+ (ScopeEnd,
+ os << "ScopeEnd(";
+ for(auto idx : e.vars)
+ os << "var$" << idx << ",";
+ for(auto idx : e.tmps)
+ os << "tmp$" << idx << ",";
+ os << ")";
+ )
+ )
+ return os;
+ }
}
::MIR::LValue MIR::LValue::clone() const
diff --git a/src/mir/mir.hpp b/src/mir/mir.hpp
index 75bb29a8..c22f8d5d 100644
--- a/src/mir/mir.hpp
+++ b/src/mir/mir.hpp
@@ -112,10 +112,13 @@ TAGGED_UNION_EX(Constant, (), Int, (
(ItemAddr, ::HIR::Path) // address of a value
), (), (), (
friend ::std::ostream& operator<<(::std::ostream& os, const Constant& v);
- bool operator==(const Constant& b) const;
- inline bool operator!=(const Constant& b) const {
- return !(*this == b);
- }
+ ::Ordering ord(const Constant& b) const;
+ inline bool operator==(const Constant& b) const { return ord(b) == ::OrdEqual; }
+ inline bool operator!=(const Constant& b) const { return ord(b) != ::OrdEqual; }
+ inline bool operator<(const Constant& b) const { return ord(b) == ::OrdLess; }
+ inline bool operator<=(const Constant& b) const { return ord(b) != ::OrdGreater; }
+ inline bool operator>(const Constant& b) const { return ord(b) == ::OrdGreater; }
+ inline bool operator>=(const Constant& b) const { return ord(b) != ::OrdLess; }
Constant clone() const;
)
);
@@ -269,8 +272,13 @@ TAGGED_UNION(Statement, Assign,
eDropKind kind; // NOTE: For the `box` primitive
LValue slot;
unsigned int flag_idx; // Valid if != ~0u
+ }),
+ (ScopeEnd, struct {
+ ::std::vector<unsigned> vars;
+ ::std::vector<unsigned> tmps;
})
);
+extern ::std::ostream& operator<<(::std::ostream& os, const Statement& x);
struct BasicBlock
{
@@ -282,6 +290,7 @@ struct BasicBlock
class Function
{
public:
+ // TODO: Unify Variables, Temporaries, and Arguments
::std::vector< ::HIR::TypeRef> named_variables;
::std::vector< ::HIR::TypeRef> temporaries;
::std::vector<bool> drop_flags;
diff --git a/src/mir/mir_builder.cpp b/src/mir/mir_builder.cpp
index 296dfa91..eb50e9f7 100644
--- a/src/mir/mir_builder.cpp
+++ b/src/mir/mir_builder.cpp
@@ -32,8 +32,14 @@ MirBuilder::MirBuilder(const Span& sp, const StaticTraitResolve& resolve, const
m_scopes.push_back( ScopeDef { sp, ScopeType::make_Temporaries({}) } );
m_scope_stack.push_back( 1 );
+
+ m_if_cond_lval = this->new_temporary(::HIR::CoreType::Bool);
+
+ m_arg_states.reserve( args.size() );
+ for(size_t i = 0; i < args.size(); i ++ )
+ m_arg_states.push_back( VarState::make_Valid({}) );
m_variable_states.reserve( output.named_variables.size() );
- for(unsigned int i = 0; i < output.named_variables.size(); i ++ )
+ for(size_t i = 0; i < output.named_variables.size(); i ++ )
m_variable_states.push_back( VarState::make_Invalid(InvalidType::Uninit) );
}
MirBuilder::~MirBuilder()
@@ -116,6 +122,22 @@ void MirBuilder::define_variable(unsigned int idx)
top_scope = &m_scopes.at(idx);
break ;
}
+ else if( m_scopes.at(idx).data.is_Loop() )
+ {
+ // Newly created temporary within a loop, if there is a saved
+ // state this temp needs a drop flag.
+ // TODO: ^
+ }
+ else if( m_scopes.at(idx).data.is_Split() )
+ {
+ // Newly created temporary within a split, if there is a saved
+ // state this temp needs a drop flag.
+ // TODO: ^
+ }
+ else
+ {
+ // Nothign.
+ }
}
assert( top_scope );
auto& tmp_scope = top_scope->data.as_Temporaries();
@@ -190,7 +212,7 @@ void MirBuilder::define_variable(unsigned int idx)
{
auto temp = new_temporary(ty);
push_stmt_assign( sp, ::MIR::LValue(temp.clone()), mv$(rv) );
- return ::MIR::Param( mv$(temp) );
+ return temp;
}
}
void MirBuilder::set_result(const Span& sp, ::MIR::RValue val)
@@ -263,7 +285,7 @@ void MirBuilder::push_stmt_assign(const Span& sp, ::MIR::LValue dst, ::MIR::RVal
// Doesn't move
),
(MakeDst,
- // Doesn't move ptr_val
+ moved_param(e.ptr_val);
moved_param(e.meta_val);
),
(Tuple,
@@ -285,7 +307,7 @@ void MirBuilder::push_stmt_assign(const Span& sp, ::MIR::LValue dst, ::MIR::RVal
// Drop target if populated
mark_value_assigned(sp, dst);
- m_output.blocks.at(m_current_block).statements.push_back( ::MIR::Statement::make_Assign({ mv$(dst), mv$(val) }) );
+ this->push_stmt( sp, ::MIR::Statement::make_Assign({ mv$(dst), mv$(val) }) );
}
void MirBuilder::push_stmt_drop(const Span& sp, ::MIR::LValue val, unsigned int flag/*=~0u*/)
{
@@ -297,11 +319,13 @@ void MirBuilder::push_stmt_drop(const Span& sp, ::MIR::LValue val, unsigned int
return ;
}
- DEBUG("DROP " << val);
-
- auto stmt = ::MIR::Statement::make_Drop({ ::MIR::eDropKind::DEEP, mv$(val), flag });
+ this->push_stmt(sp, ::MIR::Statement::make_Drop({ ::MIR::eDropKind::DEEP, mv$(val), flag }));
- m_output.blocks.at(m_current_block).statements.push_back( mv$(stmt) );
+ if( flag != ~0u )
+ {
+ // Reset flag value back to default.
+ push_stmt_set_dropflag_val(sp, flag, m_output.drop_flags.at(flag));
+ }
}
void MirBuilder::push_stmt_drop_shallow(const Span& sp, ::MIR::LValue val, unsigned int flag/*=~0u*/)
{
@@ -310,8 +334,13 @@ void MirBuilder::push_stmt_drop_shallow(const Span& sp, ::MIR::LValue val, unsig
// TODO: Ensure that the type is a Box?
- DEBUG("DROP shallow " << val);
- m_output.blocks.at(m_current_block).statements.push_back( ::MIR::Statement::make_Drop({ ::MIR::eDropKind::SHALLOW, mv$(val), flag }) );
+ this->push_stmt(sp, ::MIR::Statement::make_Drop({ ::MIR::eDropKind::SHALLOW, mv$(val), flag }));
+
+ if( flag != ~0u )
+ {
+ // Reset flag value back to default.
+ push_stmt_set_dropflag_val(sp, flag, m_output.drop_flags.at(flag));
+ }
}
void MirBuilder::push_stmt_asm(const Span& sp, ::MIR::Statement::Data_Asm data)
{
@@ -322,17 +351,25 @@ void MirBuilder::push_stmt_asm(const Span& sp, ::MIR::Statement::Data_Asm data)
mark_value_assigned(sp, v.second);
// 2. Push
- m_output.blocks.at(m_current_block).statements.push_back( ::MIR::Statement::make_Asm( mv$(data) ) );
+ this->push_stmt(sp, ::MIR::Statement::make_Asm( mv$(data) ));
}
void MirBuilder::push_stmt_set_dropflag_val(const Span& sp, unsigned int idx, bool value)
{
- ASSERT_BUG(sp, m_block_active, "Pushing statement with no active block");
- m_output.blocks.at(m_current_block).statements.push_back( ::MIR::Statement::make_SetDropFlag({ idx, value }) );
+ this->push_stmt(sp, ::MIR::Statement::make_SetDropFlag({ idx, value }));
}
void MirBuilder::push_stmt_set_dropflag_other(const Span& sp, unsigned int idx, unsigned int other)
{
+ this->push_stmt(sp, ::MIR::Statement::make_SetDropFlag({ idx, false, other }));
+}
+void MirBuilder::push_stmt_set_dropflag_default(const Span& sp, unsigned int idx)
+{
+ this->push_stmt(sp, ::MIR::Statement::make_SetDropFlag({ idx, this->get_drop_flag_default(sp, idx) }));
+}
+void MirBuilder::push_stmt(const Span& sp, ::MIR::Statement stmt)
+{
ASSERT_BUG(sp, m_block_active, "Pushing statement with no active block");
- m_output.blocks.at(m_current_block).statements.push_back( ::MIR::Statement::make_SetDropFlag({ idx, false, other }) );
+ DEBUG(stmt);
+ m_output.blocks.at(m_current_block).statements.push_back( mv$(stmt) );
}
void MirBuilder::mark_value_assigned(const Span& sp, const ::MIR::LValue& dst)
@@ -374,32 +411,56 @@ void MirBuilder::mark_value_assigned(const Span& sp, const ::MIR::LValue& dst)
}
}
-void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val, const ScopeHandle& scope)
+void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val, const ScopeHandle& scope, bool to_above/*=false*/)
{
TRACE_FUNCTION_F(val);
TU_MATCH_DEF(::MIR::LValue, (val), (e),
(
+ // No raising of these source values?
+ return ;
),
// TODO: This may not be correct, because it can change the drop points and ordering
// HACK: Working around cases where values are dropped while the result is not yet used.
+ (Index,
+ raise_variables(sp, *e.val, scope, to_above);
+ raise_variables(sp, *e.idx, scope, to_above);
+ return ;
+ ),
(Deref,
- raise_variables(sp, *e.val, scope);
+ raise_variables(sp, *e.val, scope, to_above);
+ return ;
),
(Field,
- raise_variables(sp, *e.val, scope);
+ raise_variables(sp, *e.val, scope, to_above);
+ return ;
),
(Downcast,
- raise_variables(sp, *e.val, scope);
+ raise_variables(sp, *e.val, scope, to_above);
+ return ;
),
// Actual value types
(Variable,
- auto idx = e;
- auto scope_it = m_scope_stack.rbegin();
- while( scope_it != m_scope_stack.rend() )
+ ),
+ (Temporary,
+ )
+ )
+ ASSERT_BUG(sp, val.is_Variable() || val.is_Temporary(), "Hit value raising code with non-variable value - " << val);
+
+ // Find controlling scope
+ auto scope_it = m_scope_stack.rbegin();
+ while( scope_it != m_scope_stack.rend() )
+ {
+ auto& scope_def = m_scopes.at(*scope_it);
+
+ if( *scope_it == scope.idx && !to_above )
{
- auto& scope_def = m_scopes.at(*scope_it);
+ DEBUG(val << " defined in or above target (scope " << scope << ")");
+ }
- TU_IFLET( ScopeType, scope_def.data, Variables, e,
+ TU_IFLET( ScopeType, scope_def.data, Variables, e,
+ if( const auto* ve = val.opt_Variable() )
+ {
+ auto idx = *ve;
auto tmp_it = ::std::find( e.vars.begin(), e.vars.end(), idx );
if( tmp_it != e.vars.end() )
{
@@ -407,41 +468,12 @@ void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val, const
DEBUG("Raise variable " << idx << " from " << *scope_it);
break ;
}
- )
- // If the variable was defined above the desired scope (i.e. this didn't find it), return
- if( *scope_it == scope.idx )
- return ;
- ++scope_it;
- }
- if( scope_it == m_scope_stack.rend() )
- {
- // Temporary wasn't defined in a visible scope?
- return ;
- }
- ++scope_it;
-
- while( scope_it != m_scope_stack.rend() )
- {
- auto& scope_def = m_scopes.at(*scope_it);
-
- TU_IFLET( ScopeType, scope_def.data, Variables, e,
- e.vars.push_back( idx );
- DEBUG("- to " << *scope_it);
- return ;
- )
- ++scope_it;
- }
-
- DEBUG("- top");
- ),
- (Temporary,
- auto idx = e.idx;
- auto scope_it = m_scope_stack.rbegin();
- while( scope_it != m_scope_stack.rend() )
- {
- auto& scope_def = m_scopes.at(*scope_it);
-
- TU_IFLET( ScopeType, scope_def.data, Temporaries, e,
+ }
+ )
+ else TU_IFLET( ScopeType, scope_def.data, Temporaries, e,
+ if( const auto* ve = val.opt_Temporary() )
+ {
+ auto idx = ve->idx;
auto tmp_it = ::std::find( e.temporaries.begin(), e.temporaries.end(), idx );
if( tmp_it != e.temporaries.end() )
{
@@ -449,45 +481,169 @@ void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val, const
DEBUG("Raise temporary " << idx << " from " << *scope_it);
break ;
}
- )
-
- // If the temporary was defined above the desired scope (i.e. this didn't find it), return
- if( *scope_it == scope.idx )
- return ;
- ++scope_it;
+ }
+ )
+ else
+ {
+ // TODO: Does this need to handle this value being set in the
+ // split scopes?
}
- if( scope_it == m_scope_stack.rend() )
+ // If the variable was defined above the desired scope (i.e. this didn't find it), return
+ if( *scope_it == scope.idx )
{
- // Temporary wasn't defined in a visible scope?
+ DEBUG("Value " << val << " is defined above the target (scope " << scope << ")");
return ;
}
++scope_it;
+ }
+ if( scope_it == m_scope_stack.rend() )
+ {
+ // Temporary wasn't defined in a visible scope?
+ BUG(sp, val << " wasn't defined in a visible scope");
+ return ;
+ }
- while( scope_it != m_scope_stack.rend() )
- {
- auto& scope_def = m_scopes.at(*scope_it);
+ // If the definition scope was the target scope
+ bool target_seen = false;
+ if( *scope_it == scope.idx )
+ {
+ if( to_above ) {
+ // Want to shift to any above (but not including) it
+ ++ scope_it;
+ }
+ else {
+ // Want to shift to it or above.
+ }
- TU_IFLET( ScopeType, scope_def.data, Temporaries, e,
- e.temporaries.push_back( idx );
- DEBUG("- to " << *scope_it);
- return ;
- )
- ++scope_it;
+ target_seen = true;
+ }
+ else
+ {
+ // Don't bother searching the original definition scope
+ ++scope_it;
+ }
+
+ // Iterate stack until:
+ // - The target scope is seen
+ // - AND a scope was found for it
+ for( ; scope_it != m_scope_stack.rend(); ++ scope_it )
+ {
+ auto& scope_def = m_scopes.at(*scope_it);
+ DEBUG("> Cross " << *scope_it << " - " << scope_def.data.tag_str());
+
+ if( *scope_it == scope.idx )
+ {
+ target_seen = true;
}
- DEBUG("- top");
+ TU_IFLET( ScopeType, scope_def.data, Variables, e,
+ if( target_seen )
+ {
+ if( const auto* ve = val.opt_Variable() )
+ {
+ e.vars.push_back( *ve );
+ DEBUG("- to " << *scope_it);
+ return ;
+ }
+ }
)
- )
+ else TU_IFLET( ScopeType, scope_def.data, Temporaries, e,
+ if( target_seen )
+ {
+ if( const auto* ve = val.opt_Temporary() )
+ {
+ e.temporaries.push_back( ve->idx );
+ DEBUG("- to " << *scope_it);
+ return ;
+ }
+ }
+ )
+ else if( auto* sd_loop = scope_def.data.opt_Loop() )
+ {
+ // If there is an exit state present, ensure that this variable is
+ // present in that state (as invalid, as it can't have been valid
+ // externally)
+ if( sd_loop->exit_state_valid )
+ {
+ DEBUG("Adding " << val << " as unset to loop exit state");
+ if( const auto* ve = val.opt_Variable() )
+ {
+ auto v = sd_loop->exit_state.var_states.insert( ::std::make_pair(*ve, VarState(InvalidType::Uninit)) );
+ ASSERT_BUG(sp, v.second, "Raising " << val << " which already had a state entry");
+ }
+ else if( const auto* ve = val.opt_Temporary() )
+ {
+ auto v = sd_loop->exit_state.tmp_states.insert( ::std::make_pair(ve->idx, VarState(InvalidType::Uninit)) );
+ ASSERT_BUG(sp, v.second, "Raising " << val << " which already had a state entry");
+ }
+ else {
+ BUG(sp, "Impossible raise value");
+ }
+ }
+ else
+ {
+ DEBUG("Crossing loop with no existing exit state");
+ }
+ }
+ else if( auto* sd_split = scope_def.data.opt_Split() )
+ {
+ // If the split has already registered an exit state, ensure that
+ // this variable is present in it. (as invalid)
+ if( sd_split->end_state_valid )
+ {
+ DEBUG("Adding " << val << " as unset to loop exit state");
+ if( const auto* ve = val.opt_Variable() )
+ {
+ auto v = sd_split->end_state.var_states.insert( ::std::make_pair(*ve, VarState(InvalidType::Uninit)) );
+ ASSERT_BUG(sp, v.second, "Raising " << val << " which already had a state entry");
+ }
+ else if( const auto* ve = val.opt_Temporary() )
+ {
+ auto v = sd_split->end_state.tmp_states.insert( ::std::make_pair(ve->idx, VarState(InvalidType::Uninit)) );
+ ASSERT_BUG(sp, v.second, "Raising " << val << " which already had a state entry");
+ }
+ else {
+ BUG(sp, "Impossible raise value");
+ }
+ }
+ else
+ {
+ DEBUG("Crossing split with no existing end state");
+ }
+
+ // TODO: This should update the outer state to unset.
+ auto& arm = sd_split->arms.back();
+ if( const auto* ve = val.opt_Variable() )
+ {
+ arm.var_states.insert(::std::make_pair( *ve, get_variable_state(sp, *ve).clone() ));
+ m_variable_states.at(*ve) = VarState(InvalidType::Uninit);
+ }
+ else if( const auto* ve = val.opt_Temporary() )
+ {
+ arm.tmp_states.insert(::std::make_pair( ve->idx, get_temp_state(sp, ve->idx).clone() ));
+ m_temporary_states.at(ve->idx) = VarState(InvalidType::Uninit);
+ }
+ else
+ {
+ BUG(sp, "Impossible raise value");
+ }
+ }
+ else
+ {
+ BUG(sp, "Crossing unknown scope type - " << scope_def.data.tag_str());
+ }
+ }
+ BUG(sp, "Couldn't find a scope to raise " << val << " into");
}
-void MirBuilder::raise_variables(const Span& sp, const ::MIR::RValue& rval, const ScopeHandle& scope)
+void MirBuilder::raise_variables(const Span& sp, const ::MIR::RValue& rval, const ScopeHandle& scope, bool to_above/*=false*/)
{
auto raise_vars = [&](const ::MIR::Param& p) {
if( const auto* e = p.opt_LValue() )
- this->raise_variables(sp, *e, scope);
+ this->raise_variables(sp, *e, scope, to_above);
};
TU_MATCHA( (rval), (e),
(Use,
- this->raise_variables(sp, e, scope);
+ this->raise_variables(sp, e, scope, to_above);
),
(Constant,
),
@@ -496,23 +652,23 @@ void MirBuilder::raise_variables(const Span& sp, const ::MIR::RValue& rval, cons
),
(Borrow,
// TODO: Wait, is this valid?
- this->raise_variables(sp, e.val, scope);
+ this->raise_variables(sp, e.val, scope, to_above);
),
(Cast,
- this->raise_variables(sp, e.val, scope);
+ this->raise_variables(sp, e.val, scope, to_above);
),
(BinOp,
raise_vars(e.val_l);
raise_vars(e.val_r);
),
(UniOp,
- this->raise_variables(sp, e.val, scope);
+ this->raise_variables(sp, e.val, scope, to_above);
),
(DstMeta,
- this->raise_variables(sp, e.val, scope);
+ this->raise_variables(sp, e.val, scope, to_above);
),
(DstPtr,
- this->raise_variables(sp, e.val, scope);
+ this->raise_variables(sp, e.val, scope, to_above);
),
(MakeDst,
raise_vars(e.ptr_val);
@@ -656,6 +812,21 @@ void MirBuilder::terminate_scope(const Span& sp, ScopeHandle scope, bool emit_cl
{
// 2. Emit drops for all non-moved variables (share with below)
drop_scope_values(scope_def);
+
+ // Emit ScopeEnd for all controlled values
+ ::MIR::Statement::Data_ScopeEnd se;
+ if(const auto* e = scope_def.data.opt_Variables() ) {
+ se.vars = e->vars;
+ }
+ else if(const auto* e = scope_def.data.opt_Temporaries()) {
+ se.tmps = e->temporaries;
+ }
+ else {
+ }
+ // Only push the ScopeEnd if there were variables to end
+ if( !se.vars.empty() || !se.tmps.empty() ) {
+ this->push_stmt(sp, ::MIR::Statement( mv$(se) ));
+ }
}
// 3. Pop scope (last because `drop_scope_values` uses the stack)
@@ -663,6 +834,107 @@ void MirBuilder::terminate_scope(const Span& sp, ScopeHandle scope, bool emit_cl
complete_scope(scope_def);
}
+void MirBuilder::raise_all(const Span& sp, ScopeHandle source, const ScopeHandle& target)
+{
+ TRACE_FUNCTION_F("scope " << source.idx << " => " << target.idx);
+
+ // 1. Check that this is the current scope (at the top of the stack)
+ if( m_scope_stack.empty() || m_scope_stack.back() != source.idx )
+ {
+ DEBUG("- m_scope_stack = [" << m_scope_stack << "]");
+ auto it = ::std::find( m_scope_stack.begin(), m_scope_stack.end(), source.idx );
+ if( it == m_scope_stack.end() )
+ BUG(sp, "Terminating scope not on the stack - scope " << source.idx);
+ BUG(sp, "Terminating scope " << source.idx << " when not at top of stack, " << (m_scope_stack.end() - it - 1) << " scopes in the way");
+ }
+ auto& src_scope_def = m_scopes.at(source.idx);
+
+#if 1
+ ASSERT_BUG(sp, src_scope_def.data.is_Temporaries(), "Rasising scopes can only be done on temporaries (source)");
+ auto& src_list = src_scope_def.data.as_Temporaries().temporaries;
+ for(auto idx : src_list)
+ {
+ DEBUG("> Raising " << ::MIR::LValue::make_Temporary({ idx }));
+ }
+
+ // Seek up stack until the target scope is seen
+ auto it = m_scope_stack.rbegin() + 1;
+ for( ; it != m_scope_stack.rend() && *it != target.idx; ++it)
+ {
+ auto& scope_def = m_scopes.at(*it);
+
+ if(auto* sd_loop = scope_def.data.opt_Loop())
+ {
+ if(sd_loop->exit_state_valid)
+ {
+ DEBUG("Crossing loop with existing end state");
+ // Insert these values as Invalid, both in the existing exit state, and in the changed list
+ for(auto idx : src_list)
+ {
+ auto v = sd_loop->exit_state.tmp_states.insert(::std::make_pair( idx, VarState(InvalidType::Uninit) ));
+ ASSERT_BUG(sp, v.second, "");
+ }
+ }
+ else
+ {
+ DEBUG("Crossing loop with no end state");
+ }
+
+ for(auto idx : src_list)
+ {
+ auto v2 = sd_loop->changed_tmps.insert(::std::make_pair( idx, VarState(InvalidType::Uninit) ));
+ ASSERT_BUG(sp, v2.second, "");
+ }
+ }
+ else if(auto* sd_split = scope_def.data.opt_Split())
+ {
+ if(sd_split->end_state_valid)
+ {
+ DEBUG("Crossing split with existing end state");
+ // Insert these indexes as Invalid
+ for(auto idx : src_list)
+ {
+ auto v = sd_split->end_state.tmp_states.insert(::std::make_pair( idx, VarState(InvalidType::Uninit) ));
+ ASSERT_BUG(sp, v.second, "");
+ }
+ }
+ else
+ {
+ DEBUG("Crossing split with no end state");
+ }
+
+ // TODO: Insert current state in the current arm
+ assert(!sd_split->arms.empty());
+ auto& arm = sd_split->arms.back();
+ for(auto idx : src_list)
+ {
+ arm.tmp_states.insert(::std::make_pair( idx, mv$(m_temporary_states.at(idx)) ));
+ m_temporary_states.at(idx) = VarState(InvalidType::Uninit);
+ }
+ }
+ }
+ if(it == m_scope_stack.rend())
+ {
+ BUG(sp, "Moving values to a scope not on the stack - scope " << target.idx);
+ }
+ auto& tgt_scope_def = m_scopes.at(target.idx);
+ ASSERT_BUG(sp, tgt_scope_def.data.is_Temporaries(), "Rasising scopes can only be done on temporaries (target)");
+
+ // Move all defined variables from one to the other
+ auto& tgt_list = tgt_scope_def.data.as_Temporaries().temporaries;
+ tgt_list.insert( tgt_list.end(), src_list.begin(), src_list.end() );
+#else
+ auto list = src_scope_def.data.as_Temporaries().temporaries;
+ for(auto idx : list)
+ {
+ this->raise_variables(sp, ::MIR::LValue::make_Temporary({ idx }), target);
+ }
+#endif
+
+ // Scope completed
+ m_scope_stack.pop_back();
+ src_scope_def.complete = true;
+}
void MirBuilder::terminate_scope_early(const Span& sp, const ScopeHandle& scope, bool loop_exit/*=false*/)
{
@@ -717,7 +989,7 @@ namespace
{
static void merge_state(const Span& sp, MirBuilder& builder, const ::MIR::LValue& lv, VarState& old_state, const VarState& new_state)
{
- DEBUG(lv << " : " << old_state << " <= " << new_state);
+ TRACE_FUNCTION_FR(lv << " : " << old_state << " <= " << new_state, lv << " : " << old_state);
switch(old_state.tag())
{
case VarState::TAGDEAD: throw "";
@@ -735,61 +1007,104 @@ namespace
case VarState::TAG_Optional: {
// Was invalid, now optional.
auto flag_idx = new_state.as_Optional();
- if( builder.get_drop_flag_default(sp, flag_idx) != false ) {
+ if( true || builder.get_drop_flag_default(sp, flag_idx) != false ) {
#if 1
auto new_flag = builder.new_drop_flag(false);
builder.push_stmt_set_dropflag_other(sp, new_flag, flag_idx);
+ builder.push_stmt_set_dropflag_default(sp, flag_idx);
old_state = VarState::make_Optional( new_flag );
#else
// TODO: Rewrite history. I.e. visit all previous branches and set this drop flag to `false` in all of them
TODO(sp, "Drop flag default not false when going Invalid->Optional");
#endif
}
- old_state = VarState::make_Optional( flag_idx );
+ else {
+ old_state = VarState::make_Optional( flag_idx );
+ }
return ;
}
- case VarState::TAG_Partial: {
- const auto& nse = new_state.as_Partial();
+ case VarState::TAG_MovedOut: {
+ const auto& nse = new_state.as_MovedOut();
+
+ // Create a new state that is internally valid and uses the same drop flag
+ old_state = VarState::make_MovedOut({ box$(old_state.clone()), nse.outer_flag });
+ auto& ose = old_state.as_MovedOut();
+ if( ose.outer_flag != ~0u )
+ {
+ // If the flag's default isn't false, then create a new flag that does have such a default
+ // - Other arm (old_state) uses default, this arm (new_state) can be manipulated
+ if( builder.get_drop_flag_default(sp, ose.outer_flag) != false )
+ {
+ auto new_flag = builder.new_drop_flag(false);
+ builder.push_stmt_set_dropflag_other(sp, new_flag, nse.outer_flag);
+ builder.push_stmt_set_dropflag_default(sp, nse.outer_flag);
+ ose.outer_flag = new_flag;
+ }
+ }
+ else
+ {
+ // In the old arm, the container isn't valid. Create a drop flag with a default of false and set it to true
+ ose.outer_flag = builder.new_drop_flag(false);
+ builder.push_stmt_set_dropflag_val(sp, ose.outer_flag, true);
+ }
+
bool is_box = false;
- builder.with_val_type(sp, lv, [&](const auto& ty){ is_box = builder.is_type_owned_box(ty); });
- if( is_box ) {
- ASSERT_BUG(sp, nse.inner_states.size() == 1, "");
+ builder.with_val_type(sp, lv, [&](const auto& ty){
+ is_box = builder.is_type_owned_box(ty);
+ });
+ if( is_box )
+ {
+ merge_state(sp, builder, ::MIR::LValue::make_Deref({ box$(lv.clone()) }), *ose.inner_state, *nse.inner_state);
}
- if( nse.outer_flag != ~0u ) {
- // Set the outer flag to `true` if its default isn't true
- if( builder.get_drop_flag_default(sp, nse.outer_flag) != false ) {
- builder.push_stmt_set_dropflag_val(sp, nse.outer_flag, false);
- }
+ else
+ {
+ BUG(sp, "Handle MovedOut on non-Box");
+ }
+ return ;
}
+ case VarState::TAG_Partial: {
+ const auto& nse = new_state.as_Partial();
+ bool is_enum = false;
+ builder.with_val_type(sp, lv, [&](const auto& ty){
+ is_enum = ty.m_data.is_Path() && ty.m_data.as_Path().binding.is_Enum();
+ });
- auto out = new_state.clone();
- auto& ose = out.as_Partial();
- if( ose.outer_flag == ~0u )
+ // Create a partial filled with Invalid
{
- ose.outer_flag = builder.new_drop_flag_and_set(sp, true); // Only in this arm is the container valid
+ ::std::vector<VarState> inner; inner.reserve( nse.inner_states.size() );
+ for(size_t i = 0; i < nse.inner_states.size(); i++)
+ inner.push_back( old_state.clone() );
+ old_state = VarState::make_Partial({ mv$(inner) });
}
- if( is_box ) {
- merge_state(sp, builder, ::MIR::LValue::make_Deref({ box$(lv.clone()) }), ose.inner_states[0], old_state);
+ auto& ose = old_state.as_Partial();
+ if( is_enum ) {
+ for(size_t i = 0; i < ose.inner_states.size(); i ++)
+ {
+ merge_state(sp, builder, ::MIR::LValue::make_Downcast({ box$(lv.clone()), static_cast<unsigned int>(i) }), ose.inner_states[i], nse.inner_states[i]);
+ }
}
else {
for(unsigned int i = 0; i < ose.inner_states.size(); i ++ )
{
- merge_state(sp, builder, ::MIR::LValue::make_Field({ box$(lv.clone()), i }), ose.inner_states[i], old_state);
+ merge_state(sp, builder, ::MIR::LValue::make_Field({ box$(lv.clone()), i }), ose.inner_states[i], nse.inner_states[i]);
}
}
- old_state = mv$(out);
} return;
}
break;
+ // Valid <= ...
case VarState::TAG_Valid:
switch( new_state.tag() )
{
case VarState::TAGDEAD: throw "";
+ // Valid <= Invalid
case VarState::TAG_Invalid:
old_state = VarState::make_Optional( builder.new_drop_flag_and_set(sp, false) );
return ;
+ // Valid <= Valid
case VarState::TAG_Valid:
return ;
+ // Valid <= Optional
case VarState::TAG_Optional: {
auto flag_idx = new_state.as_Optional();
// Was valid, now optional.
@@ -798,6 +1113,7 @@ namespace
#if 1
auto new_flag = builder.new_drop_flag(true);
builder.push_stmt_set_dropflag_other(sp, new_flag, flag_idx);
+ builder.push_stmt_set_dropflag_default(sp, flag_idx);
old_state = VarState::make_Optional( new_flag );
#else
// OR: Push an assign of this flag to every other completed arm
@@ -816,39 +1132,79 @@ namespace
}
return ;
}
- case VarState::TAG_Partial: {
- const auto& nse = new_state.as_Partial();
+ // Valid <= MovedOut
+ case VarState::TAG_MovedOut: {
+ const auto& nse = new_state.as_MovedOut();
+
+ // Create a new staet that is internally valid and uses the same drop flag
+ old_state = VarState::make_MovedOut({ box$(VarState::make_Valid({})), nse.outer_flag });
+ auto& ose = old_state.as_MovedOut();
+ if( ose.outer_flag != ~0u )
+ {
+ // If the flag's default isn't true, then create a new flag that does have such a default
+ // - Other arm (old_state) uses default, this arm (new_state) can be manipulated
+ if( builder.get_drop_flag_default(sp, ose.outer_flag) != true )
+ {
+ auto new_flag = builder.new_drop_flag(true);
+ builder.push_stmt_set_dropflag_other(sp, new_flag, nse.outer_flag);
+ builder.push_stmt_set_dropflag_default(sp, nse.outer_flag);
+ ose.outer_flag = new_flag;
+ }
+ }
+ else
+ {
+ // In both arms, the container is valid. No need for a drop flag
+ }
+
bool is_box = false;
- builder.with_val_type(sp, lv, [&](const auto& ty){ is_box = builder.is_type_owned_box(ty); });
+ builder.with_val_type(sp, lv, [&](const auto& ty){
+ is_box = builder.is_type_owned_box(ty);
+ });
+
if( is_box ) {
- ASSERT_BUG(sp, nse.inner_states.size() == 1, "");
+ merge_state(sp, builder, ::MIR::LValue::make_Deref({ box$(lv.clone()) }), *ose.inner_state, *nse.inner_state);
}
- if( nse.outer_flag != ~0u ) {
- // Set the outer flag to `true` if its default isn't true
- if( builder.get_drop_flag_default(sp, nse.outer_flag) != true ) {
- builder.push_stmt_set_dropflag_val(sp, nse.outer_flag, true);
- }
+ else {
+ BUG(sp, "MovedOut on non-Box");
+ }
+ return;
}
+ // Valid <= Partial
+ case VarState::TAG_Partial: {
+ const auto& nse = new_state.as_Partial();
+ bool is_enum = false;
+ builder.with_val_type(sp, lv, [&](const auto& ty){
+ is_enum = ty.m_data.is_Path() && ty.m_data.as_Path().binding.is_Enum();
+ });
- auto out = new_state.clone();
- auto& ose = out.as_Partial();
- if( ose.outer_flag == ~0u )
+ // Create a partial filled with Valid
{
- ose.outer_flag = builder.new_drop_flag(true); // In both arms, the container is valid
+ ::std::vector<VarState> inner; inner.reserve( nse.inner_states.size() );
+ for(size_t i = 0; i < nse.inner_states.size(); i++)
+ inner.push_back( VarState::make_Valid({}) );
+ old_state = VarState::make_Partial({ mv$(inner) });
}
- if( is_box ) {
- merge_state(sp, builder, ::MIR::LValue::make_Deref({ box$(lv.clone()) }), ose.inner_states[0], old_state);
+ auto& ose = old_state.as_Partial();
+ if( is_enum ) {
+ auto ilv = ::MIR::LValue::make_Downcast({ box$(lv.clone()), 0 });
+ for(size_t i = 0; i < ose.inner_states.size(); i ++)
+ {
+ merge_state(sp, builder, ilv, ose.inner_states[i], nse.inner_states[i]);
+ ilv.as_Downcast().variant_index ++;
+ }
}
else {
+ auto ilv = ::MIR::LValue::make_Field({ box$(lv.clone()), 0 });
for(unsigned int i = 0; i < ose.inner_states.size(); i ++ )
{
- merge_state(sp, builder, ::MIR::LValue::make_Field({ box$(lv.clone()), i }), ose.inner_states[i], old_state);
+ merge_state(sp, builder, ilv, ose.inner_states[i], nse.inner_states[i]);
+ ilv.as_Field().field_index ++;
}
}
- old_state = mv$(out);
} return;
}
break;
+ // Optional <= ...
case VarState::TAG_Optional:
switch( new_state.tag() )
{
@@ -863,46 +1219,134 @@ namespace
if( old_state.as_Optional() != new_state.as_Optional() ) {
#if 1
builder.push_stmt_set_dropflag_other(sp, old_state.as_Optional(), new_state.as_Optional());
+ builder.push_stmt_set_dropflag_default(sp, new_state.as_Optional());
#else
// TODO: Rewrite history replacing one flag with another (if they have the same default)
#endif
}
return ;
- case VarState::TAG_Partial:
- TODO(sp, "Handle Optional->Partial in split scope");
+ case VarState::TAG_MovedOut:
+ TODO(sp, "Handle Optional->MovedOut in split scope");
+ case VarState::TAG_Partial: {
+ const auto& nse = new_state.as_Partial();
+ bool is_enum = false;
+ builder.with_val_type(sp, lv, [&](const auto& ty){
+ assert( !builder.is_type_owned_box(ty) );
+ is_enum = ty.m_data.is_Path() && ty.m_data.as_Path().binding.is_Enum();
+ });
+ // Create a Partial filled with copies of the Optional
+ {
+ ::std::vector<VarState> inner;
+ inner.reserve( nse.inner_states.size() );
+ for(size_t i = 0; i < nse.inner_states.size(); i ++)
+ inner.push_back(old_state.clone());
+ old_state = VarState::make_Partial({ mv$(inner) });
+ }
+ auto& ose = old_state.as_Partial();
+ // Propagate to inners
+ if( is_enum ) {
+ for(size_t i = 0; i < ose.inner_states.size(); i ++)
+ {
+ merge_state(sp, builder, ::MIR::LValue::make_Downcast({ box$(lv.clone()), static_cast<unsigned int>(i) }), ose.inner_states[i], nse.inner_states[i]);
+ }
+ }
+ else {
+ for(unsigned int i = 0; i < ose.inner_states.size(); i ++ )
+ {
+ merge_state(sp, builder, ::MIR::LValue::make_Field({ box$(lv.clone()), i }), ose.inner_states[i], nse.inner_states[i]);
+ }
+ }
+ return; }
}
break;
- case VarState::TAG_Partial: {
- auto& ose = old_state.as_Partial();
+ case VarState::TAG_MovedOut: {
+ auto& ose = old_state.as_MovedOut();
bool is_box = false;
- builder.with_val_type(sp, lv, [&](const auto& ty){ is_box = builder.is_type_owned_box(ty); });
- if( is_box ) {
- ASSERT_BUG(sp, ose.inner_states.size() == 1, "");
+ builder.with_val_type(sp, lv, [&](const auto& ty){
+ is_box = builder.is_type_owned_box(ty);
+ });
+ if( !is_box ) {
+ BUG(sp, "MovedOut on non-Box");
}
- // Need to tag for conditional shallow drop? Or just do that at the end of the split?
- // - End of the split means that the only optional state is outer drop.
switch( new_state.tag() )
{
case VarState::TAGDEAD: throw "";
case VarState::TAG_Invalid:
- if( ose.outer_flag != ~0u ) {
- // Set the outer flag to `false` if its default isn't false
- if( builder.get_drop_flag_default(sp, ose.outer_flag) != false ) {
+ case VarState::TAG_Valid: {
+ bool is_valid = new_state.is_Valid();
+ if( ose.outer_flag == ~0u )
+ {
+ // If not valid in new arm, then the outer state is conditional
+ if( !is_valid )
+ {
+ ose.outer_flag = builder.new_drop_flag(true);
builder.push_stmt_set_dropflag_val(sp, ose.outer_flag, false);
}
}
- if( 0 )
- // - Fall through
- case VarState::TAG_Valid:
- if( ose.outer_flag != ~0u ) {
- // Set the outer flag to `true` if its default isn't true
- if( builder.get_drop_flag_default(sp, ose.outer_flag) != true ) {
- builder.push_stmt_set_dropflag_val(sp, ose.outer_flag, true);
+ else
+ {
+ builder.push_stmt_set_dropflag_val(sp, ose.outer_flag, is_valid);
+ }
+
+ merge_state(sp, builder, ::MIR::LValue::make_Deref({ box$(lv.clone()) }), *ose.inner_state, new_state);
+ return ; }
+ case VarState::TAG_Optional: {
+ const auto& nse = new_state.as_Optional();
+ if( ose.outer_flag == ~0u )
+ {
+ if( ! builder.get_drop_flag_default(sp, nse) )
+ {
+ // Default wasn't true, need to make a new flag that does have a default of true
+ auto new_flag = builder.new_drop_flag(true);
+ builder.push_stmt_set_dropflag_other(sp, new_flag, nse);
+ builder.push_stmt_set_dropflag_default(sp, nse);
+ ose.outer_flag = new_flag;
+ }
+ else
+ {
+ ose.outer_flag = nse;
}
}
-
- if( is_box ) {
- merge_state(sp, builder, ::MIR::LValue::make_Deref({ box$(lv.clone()) }), ose.inner_states[0], new_state);
+ else
+ {
+ // In this arm, assign the outer state to this drop flag
+ builder.push_stmt_set_dropflag_other(sp, ose.outer_flag, nse);
+ builder.push_stmt_set_dropflag_default(sp, nse);
+ }
+ merge_state(sp, builder, ::MIR::LValue::make_Deref({ box$(lv.clone()) }), *ose.inner_state, new_state);
+ return; }
+ case VarState::TAG_MovedOut: {
+ const auto& nse = new_state.as_MovedOut();
+ if( ose.outer_flag != nse.outer_flag )
+ {
+ TODO(sp, "Handle mismatched flags in MovedOut");
+ }
+ merge_state(sp, builder, ::MIR::LValue::make_Deref({ box$(lv.clone()) }), *ose.inner_state, *nse.inner_state);
+ return; }
+ case VarState::TAG_Partial:
+ BUG(sp, "MovedOut->Partial not valid");
+ }
+ break; }
+ case VarState::TAG_Partial: {
+ auto& ose = old_state.as_Partial();
+ bool is_enum = false;
+ builder.with_val_type(sp, lv, [&](const auto& ty){
+ assert( !builder.is_type_owned_box(ty) );
+ is_enum = ty.m_data.is_Path() && ty.m_data.as_Path().binding.is_Enum();
+ });
+ // Need to tag for conditional shallow drop? Or just do that at the end of the split?
+ // - End of the split means that the only optional state is outer drop.
+ switch( new_state.tag() )
+ {
+ case VarState::TAGDEAD: throw "";
+ case VarState::TAG_Invalid:
+ case VarState::TAG_Valid:
+ case VarState::TAG_Optional:
+ if( is_enum ) {
+ for(size_t i = 0; i < ose.inner_states.size(); i ++)
+ {
+ merge_state(sp, builder, ::MIR::LValue::make_Downcast({ box$(lv.clone()), static_cast<unsigned int>(i) }), ose.inner_states[i], new_state);
+ }
}
else {
for(unsigned int i = 0; i < ose.inner_states.size(); i ++ )
@@ -911,17 +1355,16 @@ namespace
}
}
return ;
- case VarState::TAG_Optional: {
- //auto flag_idx = new_state.as_Optional();
- TODO(sp, "Handle Partial->Optional in split scope");
- } return;
+ case VarState::TAG_MovedOut:
+ BUG(sp, "Partial->MovedOut not valid");
case VarState::TAG_Partial: {
const auto& nse = new_state.as_Partial();
ASSERT_BUG(sp, ose.inner_states.size() == nse.inner_states.size(), "Partial->Partial with mismatched sizes - " << old_state << " <= " << new_state);
- ASSERT_BUG(sp, ose.outer_flag == nse.outer_flag, "Partial->Partial with mismatched drop flags - " << old_state << " <= " << new_state);
- if( is_box ) {
- ASSERT_BUG(sp, nse.inner_states.size() == 1, "");
- merge_state(sp, builder, ::MIR::LValue::make_Deref({ box$(lv.clone()) }), ose.inner_states[0], nse.inner_states[0]);
+ if( is_enum ) {
+ for(size_t i = 0; i < ose.inner_states.size(); i ++)
+ {
+ merge_state(sp, builder, ::MIR::LValue::make_Downcast({ box$(lv.clone()), static_cast<unsigned int>(i) }), ose.inner_states[i], nse.inner_states[i]);
+ }
}
else {
for(unsigned int i = 0; i < ose.inner_states.size(); i ++ )
@@ -983,11 +1426,11 @@ void MirBuilder::terminate_loop_early(const Span& sp, ScopeType::Data_Loop& sd_l
void MirBuilder::end_split_arm(const Span& sp, const ScopeHandle& handle, bool reachable)
{
- ASSERT_BUG(sp, handle.idx < m_scopes.size(), "");
+ ASSERT_BUG(sp, handle.idx < m_scopes.size(), "Handle passed to end_split_arm is invalid");
auto& sd = m_scopes.at( handle.idx );
- ASSERT_BUG(sp, sd.data.is_Split(), "");
+ ASSERT_BUG(sp, sd.data.is_Split(), "Ending split arm on non-Split arm - " << sd.data.tag_str());
auto& sd_split = sd.data.as_Split();
- ASSERT_BUG(sp, !sd_split.arms.empty(), "");
+ ASSERT_BUG(sp, !sd_split.arms.empty(), "Split arm list is empty (impossible)");
TRACE_FUNCTION_F("end split scope " << handle.idx << " arm " << (sd_split.arms.size()-1));
if( reachable )
@@ -1092,10 +1535,10 @@ void MirBuilder::complete_scope(ScopeDef& sd)
TU_MATCHA( (sd.data), (e),
(Temporaries,
- DEBUG("Temporaries " << e.temporaries);
+ DEBUG("Temporaries - " << e.temporaries);
),
(Variables,
- DEBUG("Variables " << e.vars);
+ DEBUG("Variables - " << e.vars);
),
(Loop,
DEBUG("Loop");
@@ -1282,37 +1725,62 @@ void MirBuilder::with_val_type(const Span& sp, const ::MIR::LValue& val, ::std::
BUG(sp, "Downcast on unexpected type - " << ty);
),
(Path,
- //ASSERT_BUG(sp, !te.binding.is_Unbound(), "Unbound path " << ty << " encountered");
- ASSERT_BUG(sp, te.binding.is_Enum(), "Downcast on non-Enum - " << ty << " for " << val);
- const auto& enm = *te.binding.as_Enum();
- const auto& variants = enm.m_variants;
- ASSERT_BUG(sp, e.variant_index < variants.size(), "Variant index out of range");
- const auto& variant = variants[e.variant_index];
- // TODO: Make data variants refer to associated types (unify enum and struct handling)
- TU_MATCHA( (variant.second), (ve),
- (Value,
- ),
- (Unit,
- ),
- (Tuple,
- // HACK! Create tuple.
- ::std::vector< ::HIR::TypeRef> tys;
- for(const auto& fld : ve)
- tys.push_back( monomorphise_type(sp, enm.m_params, te.path.m_data.as_Generic().m_params, fld.ent) );
- ::HIR::TypeRef tup( mv$(tys) );
- m_resolve.expand_associated_types(sp, tup);
- cb(tup);
- ),
- (Struct,
- // HACK! Create tuple.
- ::std::vector< ::HIR::TypeRef> tys;
- for(const auto& fld : ve)
- tys.push_back( monomorphise_type(sp, enm.m_params, te.path.m_data.as_Generic().m_params, fld.second.ent) );
- ::HIR::TypeRef tup( mv$(tys) );
- m_resolve.expand_associated_types(sp, tup);
- cb(tup);
+ // TODO: Union?
+ if( const auto* pbe = te.binding.opt_Enum() )
+ {
+ const auto& enm = **pbe;
+ const auto& variants = enm.m_variants;
+ ASSERT_BUG(sp, e.variant_index < variants.size(), "Variant index out of range");
+ const auto& variant = variants[e.variant_index];
+ // TODO: Make data variants refer to associated types (unify enum and struct handling)
+ TU_MATCHA( (variant.second), (ve),
+ (Value,
+ DEBUG("");
+ cb(::HIR::TypeRef::new_unit());
+ ),
+ (Unit,
+ cb(::HIR::TypeRef::new_unit());
+ ),
+ (Tuple,
+ // HACK! Create tuple.
+ ::std::vector< ::HIR::TypeRef> tys;
+ for(const auto& fld : ve)
+ tys.push_back( monomorphise_type(sp, enm.m_params, te.path.m_data.as_Generic().m_params, fld.ent) );
+ ::HIR::TypeRef tup( mv$(tys) );
+ m_resolve.expand_associated_types(sp, tup);
+ cb(tup);
+ ),
+ (Struct,
+ // HACK! Create tuple.
+ ::std::vector< ::HIR::TypeRef> tys;
+ for(const auto& fld : ve)
+ tys.push_back( monomorphise_type(sp, enm.m_params, te.path.m_data.as_Generic().m_params, fld.second.ent) );
+ ::HIR::TypeRef tup( mv$(tys) );
+ m_resolve.expand_associated_types(sp, tup);
+ cb(tup);
+ )
)
- )
+ }
+ else if( const auto* pbe = te.binding.opt_Union() )
+ {
+ const auto& unm = **pbe;
+ ASSERT_BUG(sp, e.variant_index < unm.m_variants.size(), "Variant index out of range");
+ const auto& variant = unm.m_variants.at(e.variant_index);
+ const auto& fld = variant.second;
+
+ if( monomorphise_type_needed(fld.ent) ) {
+ auto sty = monomorphise_type(sp, unm.m_params, te.path.m_data.as_Generic().m_params, fld.ent);
+ m_resolve.expand_associated_types(sp, sty);
+ cb(sty);
+ }
+ else {
+ cb(fld.ent);
+ }
+ }
+ else
+ {
+ BUG(sp, "Downcast on non-Enum/Union - " << ty << " for " << val);
+ }
)
)
});
@@ -1327,185 +1795,380 @@ bool MirBuilder::lvalue_is_copy(const Span& sp, const ::MIR::LValue& val) const
DEBUG("[lvalue_is_copy] ty="<<ty);
rv = (m_resolve.type_is_copy(sp, ty) ? 2 : 1);
});
- assert(rv != 0);
+ ASSERT_BUG(sp, rv != 0, "Type for " << val << " can't be determined");
return rv == 2;
}
-const VarState& MirBuilder::get_variable_state(const Span& sp, unsigned int idx, unsigned int skip_count) const
+const VarState& MirBuilder::get_slot_state(const Span& sp, VarGroup ty, unsigned int idx, unsigned int skip_count/*=0*/) const
{
+ // 1. Find an applicable Split scope
for( auto scope_idx : ::reverse(m_scope_stack) )
{
const auto& scope_def = m_scopes.at(scope_idx);
TU_MATCH_DEF( ScopeType, (scope_def.data), (e),
(
),
+ (Temporaries,
+ if( ty == VarGroup::Temporary )
+ {
+ auto it = ::std::find(e.temporaries.begin(), e.temporaries.end(), idx);
+ if( it != e.temporaries.end() ) {
+ break ;
+ }
+ }
+ ),
(Variables,
- auto it = ::std::find(e.vars.begin(), e.vars.end(), idx);
- if( it != e.vars.end() ) {
- // If controlled by this block, exit early (won't find it elsewhere)
- break ;
+ if( ty == VarGroup::Variable )
+ {
+ auto it = ::std::find(e.vars.begin(), e.vars.end(), idx);
+ if( it != e.vars.end() ) {
+ // If controlled by this block, exit early (won't find it elsewhere)
+ break ;
+ }
}
),
(Split,
const auto& cur_arm = e.arms.back();
- auto it = cur_arm.var_states.find(idx);
- if( it != cur_arm.var_states.end() )
+ if( ty == VarGroup::Variable )
{
- if( ! skip_count -- )
+ auto it = cur_arm.var_states.find(idx);
+ if( it != cur_arm.var_states.end() )
{
- return it->second;
+ if( ! skip_count -- )
+ {
+ return it->second;
+ }
+ }
+ }
+ else if( ty == VarGroup::Temporary )
+ {
+ auto it = cur_arm.tmp_states.find(idx);
+ if( it != cur_arm.tmp_states.end() )
+ {
+ if( ! skip_count -- )
+ {
+ return it->second;
+ }
}
}
)
)
}
-
- ASSERT_BUG(sp, idx < m_variable_states.size(), "Variable " << idx << " out of range for state table");
- return m_variable_states[idx];
+ switch(ty)
+ {
+ case VarGroup::Return:
+ return m_return_state;
+ case VarGroup::Argument:
+ ASSERT_BUG(sp, idx < m_arg_states.size(), "Argument " << idx << " out of range for state table");
+ return m_arg_states.at(idx);
+ case VarGroup::Variable:
+ ASSERT_BUG(sp, idx < m_variable_states.size(), "Variable " << idx << " out of range for state table");
+ return m_variable_states[idx];
+ case VarGroup::Temporary:
+ ASSERT_BUG(sp, idx < m_temporary_states.size(), "Temporary " << idx << " out of range for state table");
+ return m_temporary_states[idx];
+ }
+ BUG(sp, "Fell off the end of get_slot_state");
}
-VarState& MirBuilder::get_variable_state_mut(const Span& sp, unsigned int idx)
+VarState& MirBuilder::get_slot_state_mut(const Span& sp, VarGroup ty, unsigned int idx)
{
VarState* ret = nullptr;
for( auto scope_idx : ::reverse(m_scope_stack) )
{
auto& scope_def = m_scopes.at(scope_idx);
- if( scope_def.data.is_Variables() )
+ if( const auto* e = scope_def.data.opt_Variables() )
{
- const auto& e = scope_def.data.as_Variables();
- auto it = ::std::find(e.vars.begin(), e.vars.end(), idx);
- if( it != e.vars.end() ) {
- break ;
+ if( ty == VarGroup::Variable )
+ {
+ auto it = ::std::find(e->vars.begin(), e->vars.end(), idx);
+ if( it != e->vars.end() ) {
+ break ;
+ }
+ }
+ }
+ else if( const auto* e = scope_def.data.opt_Temporaries() )
+ {
+ if( ty == VarGroup::Temporary )
+ {
+ auto it = ::std::find(e->temporaries.begin(), e->temporaries.end(), idx);
+ if( it != e->temporaries.end() ) {
+ break ;
+ }
}
}
else if( scope_def.data.is_Split() )
{
+ auto& e = scope_def.data.as_Split();
+ auto& cur_arm = e.arms.back();
if( ! ret )
{
- auto& e = scope_def.data.as_Split();
- auto& cur_arm = e.arms.back();
- auto it = cur_arm.var_states.find(idx);
- if( it == cur_arm.var_states.end() )
+ ::std::map<unsigned int, VarState>* states;
+ switch(ty)
{
- DEBUG("Split new (scope " << scope_idx << ")");
- ret = &(cur_arm.var_states[idx] = get_variable_state(sp, idx).clone());
+ case VarGroup::Return: states = nullptr; break;
+ case VarGroup::Argument: BUG(sp, "Mutating state of argument"); break;
+ case VarGroup::Variable: states = &cur_arm.var_states; break;
+ case VarGroup::Temporary: states = &cur_arm.tmp_states; break;
}
- else
+
+ if( states )
{
- DEBUG("Split existing (scope " << scope_idx << ")");
- ret = &it->second;
+ auto it = states->find(idx);
+ if( it == states->end() )
+ {
+ DEBUG("Split new (scope " << scope_idx << ")");
+ ret = &( (*states)[idx] = get_slot_state(sp, ty, idx).clone() );
+ }
+ else
+ {
+ DEBUG("Split existing (scope " << scope_idx << ")");
+ ret = &it->second;
+ }
}
}
}
else if( scope_def.data.is_Loop() )
{
auto& e = scope_def.data.as_Loop();
- if( e.changed_vars.count(idx) == 0 )
+ ::std::map<unsigned int, VarState>* states = nullptr;
+ switch(ty)
{
- auto state = e.exit_state_valid ? get_variable_state(sp, idx).clone() : VarState::make_Valid({});
- e.changed_vars.insert(::std::make_pair( idx, mv$(state) ));
+ case VarGroup::Return: states = nullptr; break;
+ case VarGroup::Argument: BUG(sp, "Mutating state of argument"); break;
+ case VarGroup::Variable: states = &e.changed_vars; break;
+ case VarGroup::Temporary: states = &e.changed_tmps; break;
+ }
+
+ if( states )
+ {
+ if( states->count(idx) == 0 )
+ {
+ auto state = e.exit_state_valid ? get_slot_state(sp, ty, idx).clone() : VarState::make_Valid({});
+ states->insert(::std::make_pair( idx, mv$(state) ));
+ }
}
}
else
{
}
}
-
- if( !ret )
+ if( ret )
{
- DEBUG("Outer");
- ASSERT_BUG(sp, idx < m_variable_states.size(), "Variable " << idx << " out of range for state table");
- return m_variable_states[idx];
+ return *ret;
}
else
{
- return *ret;
+ switch(ty)
+ {
+ case VarGroup::Return:
+ return m_return_state;
+ case VarGroup::Argument:
+ ASSERT_BUG(sp, idx < m_arg_states.size(), "Argument " << idx << " out of range for state table");
+ return m_arg_states.at(idx);
+ case VarGroup::Variable:
+ ASSERT_BUG(sp, idx < m_variable_states.size(), "Variable " << idx << " out of range for state table");
+ return m_variable_states[idx];
+ case VarGroup::Temporary:
+ ASSERT_BUG(sp, idx < m_temporary_states.size(), "Temporary " << idx << " out of range for state table");
+ return m_temporary_states[idx];
+ }
+ BUG(sp, "Fell off the end of get_slot_state_mut");
}
}
+const VarState& MirBuilder::get_variable_state(const Span& sp, unsigned int idx, unsigned int skip_count) const
+{
+ return get_slot_state(sp, VarGroup::Variable, idx, skip_count);
+}
+VarState& MirBuilder::get_variable_state_mut(const Span& sp, unsigned int idx)
+{
+ return get_slot_state_mut(sp, VarGroup::Variable, idx);
+}
const VarState& MirBuilder::get_temp_state(const Span& sp, unsigned int idx, unsigned int skip_count) const
{
- for( auto scope_idx : ::reverse(m_scope_stack) )
- {
- const auto& scope_def = m_scopes.at(scope_idx);
- if( scope_def.data.is_Temporaries() )
+ return get_slot_state(sp, VarGroup::Temporary, idx, skip_count);
+}
+VarState& MirBuilder::get_temp_state_mut(const Span& sp, unsigned int idx)
+{
+ return get_slot_state_mut(sp, VarGroup::Temporary, idx);
+}
+
+const VarState& MirBuilder::get_val_state(const Span& sp, const ::MIR::LValue& lv, unsigned int skip_count)
+{
+ TODO(sp, "");
+}
+VarState& MirBuilder::get_val_state_mut(const Span& sp, const ::MIR::LValue& lv)
+{
+ TRACE_FUNCTION_F(lv);
+ TU_MATCHA( (lv), (e),
+ (Variable,
+ return get_slot_state_mut(sp, VarGroup::Variable, e);
+ ),
+ (Temporary,
+ return get_slot_state_mut(sp, VarGroup::Temporary, e.idx);
+ ),
+ (Argument,
+ return get_slot_state_mut(sp, VarGroup::Argument, e.idx);
+ ),
+ (Static,
+ BUG(sp, "Attempting to mutate state of a static");
+ ),
+ (Return,
+ BUG(sp, "Move of return value");
+ return get_slot_state_mut(sp, VarGroup::Return, 0);
+ ),
+ (Field,
+ auto& ivs = get_val_state_mut(sp, *e.val);
+ VarState tpl;
+ TU_MATCHA( (ivs), (ivse),
+ (Invalid,
+ //BUG(sp, "Mutating inner state of an invalidated composite - " << lv);
+ tpl = VarState::make_Valid({});
+ ),
+ (MovedOut,
+ BUG(sp, "Field on value with MovedOut state - " << lv);
+ ),
+ (Partial,
+ ),
+ (Optional,
+ tpl = ivs.clone();
+ ),
+ (Valid,
+ tpl = VarState::make_Valid({});
+ )
+ )
+ if( !ivs.is_Partial() )
{
- const auto& e = scope_def.data.as_Temporaries();
- auto it = ::std::find(e.temporaries.begin(), e.temporaries.end(), idx);
- if( it != e.temporaries.end() ) {
- break ;
- }
+ size_t n_flds = 0;
+ with_val_type(sp, *e.val, [&](const auto& ty) {
+ DEBUG("ty = " << ty);
+ if(const auto* e = ty.m_data.opt_Path()) {
+ ASSERT_BUG(sp, e->binding.is_Struct(), "");
+ const auto& str = *e->binding.as_Struct();
+ TU_MATCHA( (str.m_data), (se),
+ (Unit,
+ BUG(sp, "Field access of unit-like struct");
+ ),
+ (Tuple,
+ n_flds = se.size();
+ ),
+ (Named,
+ n_flds = se.size();
+ )
+ )
+ }
+ else if(const auto* e = ty.m_data.opt_Tuple()) {
+ n_flds = e->size();
+ }
+ else if(const auto* e = ty.m_data.opt_Array()) {
+ n_flds = e->size_val;
+ }
+ else {
+ TODO(sp, "Determine field count for " << ty);
+ }
+ });
+ ::std::vector<VarState> inner_vs; inner_vs.reserve(n_flds);
+ for(size_t i = 0; i < n_flds; i++)
+ inner_vs.push_back( tpl.clone() );
+ ivs = VarState::make_Partial({ mv$(inner_vs) });
}
- else if( scope_def.data.is_Split() )
+ return ivs.as_Partial().inner_states.at(e.field_index);
+ ),
+ (Deref,
+ // HACK: If the dereferenced type is a Box ("owned_box") then hack in move and shallow drop
+ bool is_box = false;
+ if( this->m_lang_Box )
{
- const auto& e = scope_def.data.as_Split();
- const auto& cur_arm = e.arms.back();
- auto it = cur_arm.tmp_states.find(idx);
- if( it != cur_arm.tmp_states.end() )
- {
- if( ! skip_count -- )
- {
- return it->second;
- }
- }
+ with_val_type(sp, *e.val, [&](const auto& ty){
+ DEBUG("ty = " << ty);
+ is_box = this->is_type_owned_box(ty);
+ });
}
- }
- ASSERT_BUG(sp, idx < m_temporary_states.size(), "Temporary " << idx << " out of range for state table");
- return m_temporary_states[idx];
-}
-VarState& MirBuilder::get_temp_state_mut(const Span& sp, unsigned int idx)
-{
- VarState* ret = nullptr;
- for( auto scope_idx : ::reverse(m_scope_stack) )
- {
- auto& scope_def = m_scopes.at(scope_idx);
- if( scope_def.data.is_Temporaries() )
+
+ if( is_box )
{
- const auto& e = scope_def.data.as_Temporaries();
- auto it = ::std::find(e.temporaries.begin(), e.temporaries.end(), idx);
- if( it != e.temporaries.end() ) {
- break ;
+ ::MIR::LValue inner_lv;
+ // 1. If the inner lvalue isn't a slot with move information, move out of the lvalue into a temporary (with standard temp scope)
+ TU_MATCH_DEF( ::MIR::LValue, (*e.val), (ei),
+ (
+ with_val_type(sp, *e.val, [&](const auto& ty){ inner_lv = this->new_temporary(ty); });
+ this->push_stmt_assign(sp, inner_lv.clone(), ::MIR::RValue( mv$(*e.val) ));
+ *e.val = inner_lv.clone();
+ ),
+ (Variable,
+ inner_lv = ::MIR::LValue(ei);
+ ),
+ (Temporary,
+ inner_lv = ::MIR::LValue(ei);
+ ),
+ (Argument,
+ inner_lv = ::MIR::LValue(ei);
+ )
+ )
+ // 2. Mark the slot as requiring only a shallow drop
+ ::std::vector<VarState> inner;
+ inner.push_back(VarState::make_Valid({}));
+ auto& ivs = get_val_state_mut(sp, inner_lv);
+ if( ! ivs.is_MovedOut() )
+ {
+ unsigned int drop_flag = (ivs.is_Optional() ? ivs.as_Optional() : ~0u);
+ ivs = VarState::make_MovedOut({ box$(VarState::make_Valid({})), drop_flag });
}
+ return *ivs.as_MovedOut().inner_state;
}
- else if( scope_def.data.is_Split() )
+ else
{
- if( ! ret )
- {
- auto& e = scope_def.data.as_Split();
- auto& cur_arm = e.arms.back();
- auto it = cur_arm.tmp_states.find(idx);
- if(it == cur_arm.tmp_states.end())
+ BUG(sp, "Move out of deref with non-Copy values - &move? - " << lv << " : " << FMT_CB(ss, this->with_val_type(sp, lv, [&](const auto& ty){ss<<ty;});) );
+ }
+ ),
+ (Index,
+ BUG(sp, "Move out of index with non-Copy values - Partial move?");
+ ),
+ (Downcast,
+ // TODO: What if the inner is Copy? What if the inner is a hidden pointer?
+ auto& ivs = get_val_state_mut(sp, *e.val);
+ //static VarState ivs; ivs = VarState::make_Valid({});
+
+ if( !ivs.is_Partial() )
+ {
+ ASSERT_BUG(sp, !ivs.is_MovedOut(), "Downcast of a MovedOut value");
+
+ size_t var_count = 0;
+ with_val_type(sp, *e.val, [&](const auto& ty){
+ DEBUG("ty = " << ty);
+ ASSERT_BUG(sp, ty.m_data.is_Path(), "Downcast on non-Path type - " << ty);
+ const auto& pb = ty.m_data.as_Path().binding;
+ // TODO: What about unions?
+ // - Iirc, you can't move out of them so they will never have state mutated
+ if( pb.is_Enum() )
{
- ret = &(cur_arm.tmp_states[idx] = get_temp_state(sp, idx).clone());
+ const auto& enm = *pb.as_Enum();
+ var_count = enm.m_variants.size();
+ }
+ else if( const auto* pbe = pb.opt_Union() )
+ {
+ const auto& unm = **pbe;
+ var_count = unm.m_variants.size();
}
else
{
- ret = &it->second;
+ BUG(sp, "Downcast on non-Enum/Union - " << ty);
}
- }
- }
- else if( scope_def.data.is_Loop() )
- {
- auto& e = scope_def.data.as_Loop();
- if( e.changed_tmps.count(idx) == 0 )
+ });
+
+ ::std::vector<VarState> inner;
+ for(size_t i = 0; i < var_count; i ++)
{
- auto state = e.exit_state_valid ? get_temp_state(sp, idx).clone() : VarState::make_Valid({});
- e.changed_tmps.insert(::std::make_pair( idx, mv$(state) ));
+ inner.push_back( VarState::make_Invalid(InvalidType::Uninit) );
}
+ inner[e.variant_index] = mv$(ivs);
+ ivs = VarState::make_Partial({ mv$(inner) });
}
- else
- {
- }
- }
- if( !ret )
- {
- ASSERT_BUG(sp, idx < m_temporary_states.size(), "Temporary " << idx << " out of range for state table");
- return m_temporary_states[idx];
- }
- else
- {
- return *ret;
- }
+ return ivs.as_Partial().inner_states.at(e.variant_index);
+ )
+ )
+ BUG(sp, "Fell off send of get_val_state_mut");
}
void MirBuilder::drop_value_from_state(const Span& sp, const VarState& vs, ::MIR::LValue lv)
@@ -1516,12 +2179,41 @@ void MirBuilder::drop_value_from_state(const Span& sp, const VarState& vs, ::MIR
(Valid,
push_stmt_drop(sp, mv$(lv));
),
+ (MovedOut,
+ bool is_box = false;
+ with_val_type(sp, lv, [&](const auto& ty){
+ is_box = this->is_type_owned_box(ty);
+ });
+ if( is_box )
+ {
+ drop_value_from_state(sp, *vse.inner_state, ::MIR::LValue::make_Deref({ box$(lv.clone()) }));
+ push_stmt_drop_shallow(sp, mv$(lv), vse.outer_flag);
+ }
+ else
+ {
+ TODO(sp, "");
+ }
+ ),
(Partial,
- // TODO: Actual destructuring based on the type
- with_val_type(sp, lv, [&](const auto& ty){ ASSERT_BUG(sp, this->is_type_owned_box(ty), "TODO: Partial on non-Box"); });
- assert( vse.inner_states.size() == 1 );
- drop_value_from_state(sp, vse.inner_states[0], ::MIR::LValue::make_Deref({ box$(lv.clone()) }));
- push_stmt_drop_shallow(sp, mv$(lv), vse.outer_flag);
+ bool is_enum = false;
+ with_val_type(sp, lv, [&](const auto& ty){
+ is_enum = ty.m_data.is_Path() && ty.m_data.as_Path().binding.is_Enum();
+ });
+ if(is_enum)
+ {
+ DEBUG("TODO: Switch based on enum value");
+ //for(size_t i = 0; i < vse.inner_states.size(); i ++)
+ //{
+ // drop_value_from_state(sp, vse.inner_states[i], ::MIR::LValue::make_Downcast({ box$(lv.clone()), static_cast<unsigned int>(i) }));
+ //}
+ }
+ else
+ {
+ for(size_t i = 0; i < vse.inner_states.size(); i ++)
+ {
+ drop_value_from_state(sp, vse.inner_states[i], ::MIR::LValue::make_Field({ box$(lv.clone()), static_cast<unsigned int>(i) }));
+ }
+ }
),
(Optional,
push_stmt_drop(sp, mv$(lv), vse);
@@ -1536,6 +2228,7 @@ void MirBuilder::drop_scope_values(const ScopeDef& sd)
for(auto tmp_idx : ::reverse(e.temporaries))
{
const auto& vs = get_temp_state(sd.span, tmp_idx);
+ DEBUG("tmp" << tmp_idx << " - " << vs);
drop_value_from_state( sd.span, vs, ::MIR::LValue::make_Temporary({ tmp_idx }) );
}
),
@@ -1543,6 +2236,7 @@ void MirBuilder::drop_scope_values(const ScopeDef& sd)
for(auto var_idx : ::reverse(e.vars))
{
const auto& vs = get_variable_state(sd.span, var_idx);
+ DEBUG("var" << var_idx << " - " << vs);
drop_value_from_state( sd.span, vs, ::MIR::LValue::make_Variable(var_idx) );
}
),
@@ -1555,109 +2249,13 @@ void MirBuilder::drop_scope_values(const ScopeDef& sd)
)
}
+
void MirBuilder::moved_lvalue(const Span& sp, const ::MIR::LValue& lv)
{
- TRACE_FUNCTION_F(lv);
- TU_MATCHA( (lv), (e),
- (Variable,
- if( !lvalue_is_copy(sp, lv) ) {
- get_variable_state_mut(sp, e) = VarState::make_Invalid(InvalidType::Moved);
- }
- ),
- (Temporary,
- if( !lvalue_is_copy(sp, lv) ) {
- get_temp_state_mut(sp, e.idx) = VarState::make_Invalid(InvalidType::Moved);
- }
- ),
- (Argument,
- //TODO(sp, "Mark argument as moved");
- ),
- (Static,
- //TODO(sp, "Static - Assert that type is Copy");
- ),
- (Return,
- BUG(sp, "Read of return value");
- ),
- (Field,
- if( lvalue_is_copy(sp, lv) ) {
- }
- else {
- // TODO: Partial moves.
- moved_lvalue(sp, *e.val);
- }
- ),
- (Deref,
- if( lvalue_is_copy(sp, lv) ) {
- }
- else {
- // HACK: If the dereferenced type is a Box ("owned_box") then hack in move and shallow drop
- if( this->m_lang_Box )
- {
- bool is_box = false;
- with_val_type(sp, *e.val, [&](const auto& ty){
- DEBUG("ty = " << ty);
- is_box = this->is_type_owned_box(ty);
- });
- if( is_box )
- {
- ::MIR::LValue inner_lv;
- // 1. If the inner lvalue isn't a slot with move information, move out of the lvalue into a temporary (with standard temp scope)
- TU_MATCH_DEF( ::MIR::LValue, (*e.val), (ei),
- (
- with_val_type(sp, *e.val, [&](const auto& ty){ inner_lv = this->new_temporary(ty); });
- this->push_stmt_assign(sp, inner_lv.clone(), ::MIR::RValue( mv$(*e.val) ));
- *e.val = inner_lv.clone();
- ),
- (Variable,
- inner_lv = ::MIR::LValue(ei);
- ),
- (Temporary,
- inner_lv = ::MIR::LValue(ei);
- ),
- (Argument,
- inner_lv = ::MIR::LValue(ei);
- )
- )
- // 2. Mark the slot as requiring only a shallow drop
- // - TODO: Have a drop flag attached to the
- ::std::vector<VarState> ivs;
- ivs.push_back(VarState::make_Invalid(InvalidType::Moved));
- TU_MATCH_DEF( ::MIR::LValue, (inner_lv), (ei),
- (
- BUG(sp, "Box move out of invalid LValue " << inner_lv << " - should have been moved");
- ),
- (Variable,
- get_variable_state_mut(sp, ei) = VarState::make_Partial({ mv$(ivs), ~0u });
- ),
- (Temporary,
- get_temp_state_mut(sp, ei.idx) = VarState::make_Partial({ mv$(ivs), ~0u });
- ),
- (Argument,
- TODO(sp, "Mark arg " << ei.idx << " for shallow drop");
- )
- )
- // Early return!
- return ;
- }
- }
- BUG(sp, "Move out of deref with non-Copy values - &move? - " << lv << " : " << FMT_CB(ss, this->with_val_type(sp, lv, [&](const auto& ty){ss<<ty;});) );
- moved_lvalue(sp, *e.val);
- }
- ),
- (Index,
- if( lvalue_is_copy(sp, lv) ) {
- }
- else {
- BUG(sp, "Move out of index with non-Copy values - Partial move?");
- moved_lvalue(sp, *e.val);
- }
- moved_lvalue(sp, *e.idx);
- ),
- (Downcast,
- // TODO: What if the inner is Copy? What if the inner is a hidden pointer?
- moved_lvalue(sp, *e.val);
- )
- )
+ if( !lvalue_is_copy(sp, lv) ) {
+ auto& vs = get_val_state_mut(sp, lv);
+ vs = VarState::make_Invalid(InvalidType::Moved);
+ }
}
const ::MIR::LValue& MirBuilder::get_ptr_to_dst(const Span& sp, const ::MIR::LValue& lv) const
@@ -1702,12 +2300,15 @@ VarState VarState::clone() const
(Optional,
return VarState(e);
),
+ (MovedOut,
+ return VarState::make_MovedOut({ box$(e.inner_state->clone()), e.outer_flag });
+ ),
(Partial,
::std::vector<VarState> n;
n.reserve(e.inner_states.size());
for(const auto& a : e.inner_states)
n.push_back( a.clone() );
- return VarState::make_Partial({ mv$(n), e.outer_flag });
+ return VarState::make_Partial({ mv$(n) });
)
)
throw "";
@@ -1726,9 +2327,12 @@ bool VarState::operator==(VarState& x) const
(Optional,
return te == xe;
),
- (Partial,
+ (MovedOut,
if( te.outer_flag != xe.outer_flag )
return false;
+ return *te.inner_state == *xe.inner_state;
+ ),
+ (Partial,
if( te.inner_states.size() != xe.inner_states.size() )
return false;
for(unsigned int i = 0; i < te.inner_states.size(); i ++)
@@ -1758,12 +2362,16 @@ bool VarState::operator==(VarState& x) const
(Optional,
os << "Optional(" << e << ")";
),
- (Partial,
- os << "Partial(";
+ (MovedOut,
+ os << "MovedOut(";
if( e.outer_flag == ~0u )
os << "-";
else
os << "df" << e.outer_flag;
+ os << " " << *e.inner_state <<")";
+ ),
+ (Partial,
+ os << "Partial(";
os << ", [" << e.inner_states << "])";
)
)
diff --git a/src/mir/operations.hpp b/src/mir/operations.hpp
index cdc9c00b..1c06bc8c 100644
--- a/src/mir/operations.hpp
+++ b/src/mir/operations.hpp
@@ -10,6 +10,8 @@
// Check that the MIR is well-formed
extern void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path, const ::MIR::Function& fcn, const ::HIR::Function::args_t& args, const ::HIR::TypeRef& ret_type);
+// -
+extern void MIR_Validate_Full(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path, const ::MIR::Function& fcn, const ::HIR::Function::args_t& args, const ::HIR::TypeRef& ret_type);
// Perform needed changes to the generated MIR (virtualisation, Unsize/CoerceUnsize, ...)
extern void MIR_Cleanup(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path, ::MIR::Function& fcn, const ::HIR::Function::args_t& args, const ::HIR::TypeRef& ret_type);
// Optimise the MIR
diff --git a/src/mir/optimise.cpp b/src/mir/optimise.cpp
index cb96cba0..d7379526 100644
--- a/src/mir/optimise.cpp
+++ b/src/mir/optimise.cpp
@@ -13,6 +13,16 @@
#include <mir/helpers.hpp>
#include <mir/operations.hpp>
#include <mir/visit_crate_mir.hpp>
+#include <algorithm>
+#include <iomanip>
+#include <trans/target.hpp>
+
+#define DUMP_BEFORE_ALL 0
+#define DUMP_BEFORE_CONSTPROPAGATE 0
+#define CHECK_AFTER_PASS 0
+
+#define DUMP_AFTER_DONE 0
+#define CHECK_AFTER_DONE 1
namespace {
::MIR::BasicBlockId get_new_target(const ::MIR::TypeResolve& state, ::MIR::BasicBlockId bb)
@@ -182,6 +192,8 @@ namespace {
(Drop,
// Well, it mutates...
rv |= visit_mir_lvalue_mut(e.slot, ValUsage::Write, cb);
+ ),
+ (ScopeEnd,
)
)
return rv;
@@ -269,7 +281,7 @@ namespace {
),
(UfcsKnown,
TRACE_FUNCTION_F(path);
-
+
// Obtain trait pointer (for default impl and to know what the item type is)
const auto& trait_ref = state.m_resolve.m_crate.get_trait_by_path(state.sp, pe.trait.m_path);
auto trait_vi_it = trait_ref.m_values.find(pe.item);
@@ -316,7 +328,7 @@ namespace {
}
return false;
});
-
+
if( bound_found ) {
return nullptr;
}
@@ -330,6 +342,7 @@ namespace {
params.self_ty = &*pe.type;
params.fcn_params = &pe.params;
+ // Search for the method in the impl
auto fit = impl.m_methods.find(pe.item);
if( fit != impl.m_methods.end() )
{
@@ -424,35 +437,46 @@ void MIR_Optimise(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
while( MIR_Optimise_PropagateSingleAssignments(state, fcn) )
change_happened = true;
+ change_happened |= MIR_Optimise_UnifyBlocks(state, fcn);
+
// >> Unify duplicate temporaries
// If two temporaries don't overlap in lifetime (blocks in which they're valid), unify the two
change_happened |= MIR_Optimise_UnifyTemporaries(state, fcn);
// >> Combine Duplicate Blocks
change_happened |= MIR_Optimise_UnifyBlocks(state, fcn);
- #if 0
if( change_happened )
{
- //MIR_Dump_Fcn(::std::cout, fcn);
+ #if DUMP_AFTER_PASS
+ if( debug_enabled() ) {
+ MIR_Dump_Fcn(::std::cout, fcn);
+ }
+ #endif
+ #if CHECK_AFTER_PASS
MIR_Validate(resolve, path, fcn, args, ret_type);
+ #endif
}
- #endif
MIR_Optimise_GarbageCollect_Partial(state, fcn);
pass_num += 1;
} while( change_happened );
- #if 1
+ #if DUMP_AFTER_DONE
if( debug_enabled() ) {
MIR_Dump_Fcn(::std::cout, fcn);
}
#endif
+ #if CHECK_AFTER_DONE
// DEFENCE: Run validation _before_ GC (so validation errors refer to the pre-gc numbers)
MIR_Validate(resolve, path, fcn, args, ret_type);
+ #endif
// GC pass on blocks and variables
// - Find unused blocks, then delete and rewrite all references.
MIR_Optimise_GarbageCollect(state, fcn);
+
+ //MIR_Validate(resolve, path, fcn, args, ret_type);
+ //MIR_Validate_Full(resolve, path, fcn, args, ret_type);
}
// --------------------------------------------------------------------
@@ -463,6 +487,31 @@ bool MIR_Optimise_BlockSimplify(::MIR::TypeResolve& state, ::MIR::Function& fcn)
// >> Replace targets that point to a block that is just a goto
for(auto& block : fcn.blocks)
{
+ // Unify sequential ScopeEnd statements
+ if( block.statements.size() > 1 )
+ {
+ for(auto it = block.statements.begin() + 1; it != block.statements.end(); )
+ {
+ if( (it-1)->is_ScopeEnd() && it->is_ScopeEnd() )
+ {
+ auto& dst = (it-1)->as_ScopeEnd();
+ const auto& src = it->as_ScopeEnd();
+ DEBUG("Unify " << *(it-1) << " and " << *it);
+ for(auto v : src.vars)
+ dst.vars.push_back(v);
+ for(auto v : src.tmps)
+ dst.tmps.push_back(v);
+ ::std::sort(dst.vars.begin(), dst.vars.end());
+ ::std::sort(dst.tmps.begin(), dst.tmps.end());
+ it = block.statements.erase(it);
+ }
+ else
+ {
+ ++ it;
+ }
+ }
+ }
+
TU_MATCHA( (block.terminator), (e),
(Incomplete,
),
@@ -583,7 +632,7 @@ bool MIR_Optimise_BlockSimplify(::MIR::TypeResolve& state, ::MIR::Function& fcn)
bool MIR_Optimise_Inlining(::MIR::TypeResolve& state, ::MIR::Function& fcn)
{
TRACE_FUNCTION;
-
+
struct H
{
static bool can_inline(const ::HIR::Path& path, const ::MIR::Function& fcn)
@@ -680,22 +729,21 @@ bool MIR_Optimise_Inlining(::MIR::TypeResolve& state, ::MIR::Function& fcn)
return rv;
}
- ::MIR::BasicBlock clone_bb(const ::MIR::BasicBlock& src) const
+ ::MIR::BasicBlock clone_bb(const ::MIR::BasicBlock& src, unsigned src_idx, unsigned new_idx) const
{
::MIR::BasicBlock rv;
rv.statements.reserve( src.statements.size() );
for(const auto& stmt : src.statements)
{
+ DEBUG("BB" << src_idx << "->BB" << new_idx << "/" << rv.statements.size() << ": " << stmt);
TU_MATCHA( (stmt), (se),
(Assign,
- DEBUG(se.dst << " = " << se.src);
rv.statements.push_back( ::MIR::Statement::make_Assign({
this->clone_lval(se.dst),
this->clone_rval(se.src)
}) );
),
(Asm,
- DEBUG("asm!");
rv.statements.push_back( ::MIR::Statement::make_Asm({
se.tpl,
this->clone_name_lval_vec(se.outputs),
@@ -705,7 +753,6 @@ bool MIR_Optimise_Inlining(::MIR::TypeResolve& state, ::MIR::Function& fcn)
}) );
),
(SetDropFlag,
- DEBUG("df" << se.idx << " = ");
rv.statements.push_back( ::MIR::Statement::make_SetDropFlag({
this->df_base + se.idx,
se.new_val,
@@ -713,17 +760,27 @@ bool MIR_Optimise_Inlining(::MIR::TypeResolve& state, ::MIR::Function& fcn)
}) );
),
(Drop,
- DEBUG("drop " << se.slot);
rv.statements.push_back( ::MIR::Statement::make_Drop({
se.kind,
this->clone_lval(se.slot),
se.flag_idx == ~0u ? ~0u : this->df_base + se.flag_idx
}) );
+ ),
+ (ScopeEnd,
+ ::MIR::Statement::Data_ScopeEnd new_se;
+ new_se.vars.reserve(se.vars.size());
+ for(auto idx : se.vars)
+ new_se.vars.push_back(this->var_base + idx);
+ new_se.tmps.reserve(se.tmps.size());
+ for(auto idx : se.tmps)
+ new_se.tmps.push_back(this->tmp_base + idx);
+ rv.statements.push_back(::MIR::Statement( mv$(new_se) ));
)
)
}
- DEBUG(src.terminator);
+ DEBUG("BB" << src_idx << "->BB" << new_idx << "/" << rv.statements.size() << ": " << src.terminator);
rv.terminator = this->clone_term(src.terminator);
+ DEBUG("-> " << rv.terminator);
return rv;
}
::MIR::Terminator clone_term(const ::MIR::Terminator& src) const
@@ -975,7 +1032,7 @@ bool MIR_Optimise_Inlining(::MIR::TypeResolve& state, ::MIR::Function& fcn)
new_blocks.reserve( called_mir->blocks.size() );
for(const auto& bb : called_mir->blocks)
{
- new_blocks.push_back( cloner.clone_bb(bb) );
+ new_blocks.push_back( cloner.clone_bb(bb, (&bb - called_mir->blocks.data()), fcn.blocks.size() + new_blocks.size()) );
}
// > Append new temporaries
for(auto& val : cloner.const_assignments)
@@ -1000,6 +1057,7 @@ bool MIR_Optimise_Inlining(::MIR::TypeResolve& state, ::MIR::Function& fcn)
return inline_happened;
}
+
// --------------------------------------------------------------------
// If two temporaries don't overlap in lifetime (blocks in which they're valid), unify the two
// --------------------------------------------------------------------
@@ -1030,249 +1088,9 @@ bool MIR_Optimise_UnifyTemporaries(::MIR::TypeResolve& state, ::MIR::Function& f
return false;
}
- struct VarLifetime {
- ::std::vector<bool> blocks;
-
- VarLifetime(const ::MIR::Function& fcn):
- blocks(fcn.blocks.size())
- {
- }
-
- bool is_valid() const {
- for(auto v : blocks)
- if( v )
- return true;
- return false;
- }
- bool overlaps(const VarLifetime& x) const {
- assert(blocks.size() == x.blocks.size());
- for(unsigned int i = 0; i < blocks.size(); i ++)
- {
- if( blocks[i] && x.blocks[i] )
- return true;
- }
- return false;
- }
- void unify(const VarLifetime& x) {
- assert(blocks.size() == x.blocks.size());
- for(unsigned int i = 0; i < blocks.size(); i ++)
- {
- if( x.blocks[i] )
- blocks[i] = true;
- }
- }
- };
- //::std::vector<VarLifetime> var_lifetimes;
- ::std::vector<VarLifetime> tmp_lifetimes( fcn.temporaries.size(), VarLifetime(fcn) );
-
- // 1. Calculate lifetimes of all variables/temporaries that are eligable to be merged
- // - Lifetime is from first write to last read. Borrows lead to the value being assumed to live forever
- // - > BUT: Since this is lazy, it's taken as only being the lifetime of non-Copy items (as determined by the drop call or a move)
- {
- auto mark_borrowed = [&](const ::MIR::LValue& lv) {
- if( const auto* ve = lv.opt_Temporary() ) {
- replacable[ve->idx] = false;
- }
- // TODO: Recurse!
- };
-
- struct State {
- //::std::vector<bool> vars;
- ::std::vector<bool> tmps;
-
- State() {}
- State(const ::MIR::Function& fcn):
- tmps(fcn.temporaries.size())
- {
- }
-
- bool merge(const State& other) {
- if( tmps.size() == 0 )
- {
- assert(other.tmps.size() != 0);
- tmps = other.tmps;
- return true;
- }
- else
- {
- assert(tmps.size() == other.tmps.size());
- bool rv = false;
- for(unsigned int i = 0; i < tmps.size(); i ++)
- {
- if( tmps[i] != other.tmps[i] && other.tmps[i] ) {
- tmps[i] = true;
- rv = true;
- }
- }
- return rv;
- }
- }
-
- void mark_validity(const ::MIR::TypeResolve& mir_res, const ::MIR::LValue& lv, bool val) {
- if( const auto& ve = lv.opt_Temporary() ) {
- tmps[ve->idx] = val;
- }
- else {
- }
- }
- void move_val(const ::MIR::TypeResolve& mir_res, const ::MIR::LValue& lv) {
- ::HIR::TypeRef tmp;
- if( mir_res.m_resolve.type_is_copy( mir_res.sp, mir_res.get_lvalue_type(tmp, lv) ) ) {
- }
- else {
- mark_validity(mir_res, lv, false);
- }
- }
- void move_val(const ::MIR::TypeResolve& mir_res, const ::MIR::Param& p) {
- if(const auto* e = p.opt_LValue())
- {
- move_val(mir_res, *e);
- }
- }
- };
- ::std::vector<State> block_states( fcn.blocks.size() );
- ::std::vector< ::std::pair<unsigned int, State> > to_visit;
- auto add_to_visit = [&to_visit](unsigned int bb, State state) {
- to_visit.push_back( ::std::make_pair(bb, mv$(state)) );
- };
- to_visit.push_back( ::std::make_pair(0, State(fcn)) );
- while( !to_visit.empty() )
- {
- auto bb_idx = to_visit.back().first;
- auto val_state = mv$(to_visit.back().second);
- to_visit.pop_back();
-
- // 1. Merge with block state
- if( ! block_states[bb_idx].merge(val_state) )
- continue ;
- //DEBUG("BB" << bb_idx);
-
- // 2. Run block
- const auto& bb = fcn.blocks[bb_idx];
- for(unsigned int stmt_idx = 0; stmt_idx < bb.statements.size(); stmt_idx ++)
- {
- const auto& stmt = bb.statements[stmt_idx];
- state.set_cur_stmt(bb_idx, stmt_idx);
-
- switch( stmt.tag() )
- {
- case ::MIR::Statement::TAGDEAD:
- throw "";
- case ::MIR::Statement::TAG_SetDropFlag:
- break;
- case ::MIR::Statement::TAG_Drop:
- val_state.mark_validity( state, stmt.as_Drop().slot, false );
- break;
- case ::MIR::Statement::TAG_Asm:
- for(const auto& v : stmt.as_Asm().outputs)
- val_state.mark_validity( state, v.second, true );
- break;
- case ::MIR::Statement::TAG_Assign:
- // Check source (and invalidate sources)
- TU_MATCH( ::MIR::RValue, (stmt.as_Assign().src), (se),
- (Use,
- val_state.move_val(state, se);
- ),
- (Constant,
- ),
- (SizedArray,
- val_state.move_val(state, se.val);
- ),
- (Borrow,
- mark_borrowed(se.val);
- ),
- (Cast,
- ),
- (BinOp,
- ),
- (UniOp,
- ),
- (DstMeta,
- ),
- (DstPtr,
- ),
- (MakeDst,
- val_state.move_val(state, se.meta_val);
- ),
- (Tuple,
- for(const auto& v : se.vals)
- val_state.move_val(state, v);
- ),
- (Array,
- for(const auto& v : se.vals)
- val_state.move_val(state, v);
- ),
- (Variant,
- val_state.move_val(state, se.val);
- ),
- (Struct,
- for(const auto& v : se.vals)
- val_state.move_val(state, v);
- )
- )
- // Mark destination as valid
- val_state.mark_validity( state, stmt.as_Assign().dst, true );
- break;
- }
- block_states[bb_idx].merge(val_state);
- }
-
- // 3. During terminator, merge again
- state.set_cur_stmt_term(bb_idx);
- //DEBUG("- " << bb.terminator);
- TU_MATCH(::MIR::Terminator, (bb.terminator), (e),
- (Incomplete,
- // Should be impossible here.
- ),
- (Return,
- block_states[bb_idx].merge(val_state);
- ),
- (Diverge,
- ),
- (Goto,
- block_states[bb_idx].merge(val_state);
- // Push block with the new state
- add_to_visit( e, mv$(val_state) );
- ),
- (Panic,
- // What should be done here?
- ),
- (If,
- // Push blocks
- block_states[bb_idx].merge(val_state);
- add_to_visit( e.bb0, val_state );
- add_to_visit( e.bb1, mv$(val_state) );
- ),
- (Switch,
- block_states[bb_idx].merge(val_state);
- for(const auto& tgt : e.targets)
- {
- add_to_visit( tgt, val_state );
- }
- ),
- (Call,
- for(const auto& arg : e.args)
- val_state.move_val( state, arg );
- block_states[bb_idx].merge(val_state);
- // Push blocks (with return valid only in one)
- add_to_visit(e.panic_block, val_state);
-
- // TODO: If the function returns !, don't follow the ret_block
- val_state.mark_validity( state, e.ret_val, true );
- add_to_visit(e.ret_block, mv$(val_state));
- )
- )
- }
-
- // Convert block states into temp states
- for(unsigned int bb_idx = 0; bb_idx < block_states.size(); bb_idx ++)
- {
- for(unsigned int tmp_idx = 0; tmp_idx < block_states[bb_idx].tmps.size(); tmp_idx ++)
- {
- tmp_lifetimes[tmp_idx].blocks[bb_idx] = block_states[bb_idx].tmps[tmp_idx];
- }
- }
- }
+ auto lifetimes = MIR_Helper_GetLifetimes(state, fcn, /*dump_debug=*/true);
+ //::std::vector<::MIR::ValueLifetime> var_lifetimes = mv$(lifetimes.m_variables);
+ ::std::vector<::MIR::ValueLifetime> tmp_lifetimes = mv$(lifetimes.m_temporaries);
// 2. Unify variables of the same type with distinct non-overlapping lifetimes
::std::map<unsigned int, unsigned int> replacements;
@@ -1282,7 +1100,7 @@ bool MIR_Optimise_UnifyTemporaries(::MIR::TypeResolve& state, ::MIR::Function& f
{
if( ! replacable[tmpidx] ) continue ;
if( visited[tmpidx] ) continue ;
- if( ! tmp_lifetimes[tmpidx].is_valid() ) continue ;
+ if( ! tmp_lifetimes[tmpidx].is_used() ) continue ;
visited[tmpidx] = true;
for(unsigned int i = tmpidx+1; i < fcn.temporaries.size(); i ++)
@@ -1291,11 +1109,12 @@ bool MIR_Optimise_UnifyTemporaries(::MIR::TypeResolve& state, ::MIR::Function& f
continue ;
if( fcn.temporaries[i] != fcn.temporaries[tmpidx] )
continue ;
- if( ! tmp_lifetimes[i].is_valid() ) continue ;
+ if( ! tmp_lifetimes[i].is_used() )
+ continue ;
// Variables are of the same type, check if they overlap
if( tmp_lifetimes[tmpidx].overlaps( tmp_lifetimes[i] ) )
continue ;
- // They overlap, unify
+ // They don't overlap, unify
tmp_lifetimes[tmpidx].unify( tmp_lifetimes[i] );
replacements[i] = tmpidx;
replacement_needed = true;
@@ -1318,6 +1137,8 @@ bool MIR_Optimise_UnifyTemporaries(::MIR::TypeResolve& state, ::MIR::Function& f
}
return false;
});
+
+ // TODO: Replace in ScopeEnd too?
}
return replacement_needed;
@@ -1370,6 +1191,12 @@ bool MIR_Optimise_UnifyBlocks(::MIR::TypeResolve& state, ::MIR::Function& fcn)
return false;
if( ae.slot != be.slot )
return false;
+ ),
+ (ScopeEnd,
+ if( ae.vars != be.vars )
+ return false;
+ if( ae.tmps == be.tmps )
+ return false;
)
)
}
@@ -1462,7 +1289,7 @@ bool MIR_Optimise_UnifyBlocks(::MIR::TypeResolve& state, ::MIR::Function& fcn)
if( ! replacements.empty() )
{
//MIR_TODO(state, "Unify blocks - " << replacements);
- DEBUG("Unify blocks - " << replacements);
+ DEBUG("Unify blocks (old: new) - " << replacements);
auto patch_tgt = [&replacements](::MIR::BasicBlockId& tgt) {
auto it = replacements.find(tgt);
if( it != replacements.end() )
@@ -1524,6 +1351,9 @@ bool MIR_Optimise_UnifyBlocks(::MIR::TypeResolve& state, ::MIR::Function& fcn)
// --------------------------------------------------------------------
bool MIR_Optimise_ConstPropagte(::MIR::TypeResolve& state, ::MIR::Function& fcn)
{
+#if DUMP_BEFORE_ALL || DUMP_BEFORE_CONSTPROPAGATE
+ if( debug_enabled() ) MIR_Dump_Fcn(::std::cout, fcn);
+#endif
bool changed = false;
TRACE_FUNCTION_FR("", changed);
@@ -1538,26 +1368,28 @@ bool MIR_Optimise_ConstPropagte(::MIR::TypeResolve& state, ::MIR::Function& fcn)
const auto& tef = te.fcn.as_Intrinsic();
if( tef.name == "size_of" )
{
- //size_t size_val = 0;
- //if( Target_GetSizeOf(tef.params.m_types.at(0), size_val) )
- //{
- // bb.statements.push_back(::MIR::Statement::make_Assign({ mv$(te.ret_val), ::MIR::Constant::make_Uint(size_val) }));
- // bb.terminator = ::MIR::Terminator::make_Goto(te.ret_block);
- // changed = true;
- //}
+ size_t size_val = 0;
+ if( Target_GetSizeOf(state.sp, tef.params.m_types.at(0), size_val) )
+ {
+ auto val = ::MIR::Constant::make_Uint({ size_val, ::HIR::CoreType::Usize });
+ bb.statements.push_back(::MIR::Statement::make_Assign({ mv$(te.ret_val), mv$(val) }));
+ bb.terminator = ::MIR::Terminator::make_Goto(te.ret_block);
+ changed = true;
+ }
}
else if( tef.name == "align_of" )
{
- //size_t size_val = 0;
- //if( Target_GetAlignOf(tef.params.m_types.at(0), size_val) )
- //{
- // bb.statements.push_back(::MIR::Statement::make_Assign({ mv$(te.ret_val), ::MIR::Constant::make_Uint(size_val) }));
- // bb.terminator = ::MIR::Terminator::make_Goto(te.ret_block);
- // changed = true;
- //}
+ size_t align_val = 0;
+ if( Target_GetAlignOf(state.sp, tef.params.m_types.at(0), align_val) )
+ {
+ auto val = ::MIR::Constant::make_Uint({ align_val, ::HIR::CoreType::Usize });
+ bb.statements.push_back(::MIR::Statement::make_Assign({ mv$(te.ret_val), mv$(val) }));
+ bb.terminator = ::MIR::Terminator::make_Goto(te.ret_block);
+ changed = true;
+ }
}
- // NOTE: Quick special-case for bswap<u8> (a no-op)
- else if( tef.name == "bswap" && tef.params.m_types.at(0) == ::HIR::CoreType::U8 )
+ // NOTE: Quick special-case for bswap<u8/i8> (a no-op)
+ else if( tef.name == "bswap" && (tef.params.m_types.at(0) == ::HIR::CoreType::U8 || tef.params.m_types.at(0) == ::HIR::CoreType::I8) )
{
DEBUG("bswap<u8> is a no-op");
if( auto* e = te.args.at(0).opt_LValue() )
@@ -1567,6 +1399,14 @@ bool MIR_Optimise_ConstPropagte(::MIR::TypeResolve& state, ::MIR::Function& fcn)
bb.terminator = ::MIR::Terminator::make_Goto(te.ret_block);
changed = true;
}
+ else if( tef.name == "mrustc_slice_len" )
+ {
+ MIR_ASSERT(state, te.args.at(0).is_LValue(), "Argument to `get_dst_meta` must be a lvalue");
+ auto& e = te.args.at(0).as_LValue();
+ bb.statements.push_back(::MIR::Statement::make_Assign({ mv$(te.ret_val), ::MIR::RValue::make_DstMeta({ mv$(e) }) }));
+ bb.terminator = ::MIR::Terminator::make_Goto(te.ret_block);
+ changed = true;
+ }
else
{
// Ignore any other intrinsics
@@ -1579,8 +1419,267 @@ bool MIR_Optimise_ConstPropagte(::MIR::TypeResolve& state, ::MIR::Function& fcn)
// > NOTE: No need to locally stitch blocks, next pass will do that
// TODO: Use ValState to do full constant propagation across blocks
- // 1. Remove based on known booleans within a single block
- // - Eliminates `if false`/`if true` branches
+ // Remove redundant temporaries and evaluate known binops
+ for(auto& bb : fcn.blocks)
+ {
+ auto bbidx = &bb - &fcn.blocks.front();
+
+ ::std::map< ::MIR::LValue, ::MIR::Constant > known_values;
+ ::std::map< unsigned, bool > known_drop_flags;
+
+ auto check_param = [&](::MIR::Param& p) {
+ if(const auto* pe = p.opt_LValue()) {
+ auto it = known_values.find(*pe);
+ if( it != known_values.end() )
+ {
+ DEBUG(state << "Value " << *pe << " known to be " << it->second);
+ p = it->second.clone();
+ }
+ }
+ };
+
+ for(auto& stmt : bb.statements)
+ {
+ auto stmtidx = &stmt - &bb.statements.front();
+ state.set_cur_stmt(bbidx, stmtidx);
+ // Scan statements forwards:
+ // - If a known temporary is used as Param::LValue, replace LValue with the value
+ // - If a UniOp has its input known, evaluate
+ // - If a BinOp has both values known, evaluate
+ if( auto* e = stmt.opt_Assign() )
+ {
+ TU_MATCHA( (e->src), (se),
+ (Use,
+ auto it = known_values.find(se);
+ if( it != known_values.end() )
+ {
+ DEBUG(state << "Value " << se << " known to be" << it->second);
+ e->src = it->second.clone();
+ }
+ ),
+ (Constant,
+ // Ignore (knowledge done below)
+ ),
+ (SizedArray,
+ check_param(se.val);
+ ),
+ (Borrow,
+ ),
+ (Cast,
+ ),
+ (BinOp,
+ check_param(se.val_l);
+ check_param(se.val_r);
+
+ if( se.val_l.is_Constant() && se.val_r.is_Constant() )
+ {
+ const auto& val_l = se.val_l.as_Constant();
+ const auto& val_r = se.val_r.as_Constant();
+
+ ::MIR::Constant new_value;
+ bool replace = false;
+ switch(se.op)
+ {
+ case ::MIR::eBinOp::EQ:
+ if( val_l.is_Const() )
+ ;
+ else
+ {
+ replace = true;
+ new_value = ::MIR::Constant::make_Bool({val_l == val_r});
+ }
+ break;
+ case ::MIR::eBinOp::NE:
+ if( val_l.is_Const() )
+ ;
+ else
+ {
+ replace = true;
+ new_value = ::MIR::Constant::make_Bool({val_l != val_r});
+ }
+ break;
+ // TODO: Other binary operations
+ default:
+ break;
+ }
+
+ if( replace )
+ {
+ DEBUG(state << " " << e->src << " = " << new_value);
+ e->src = mv$(new_value);
+ }
+ }
+ ),
+ (UniOp,
+ auto it = known_values.find(se.val);
+ if( it != known_values.end() )
+ {
+ const auto& val = it->second;
+ ::MIR::Constant new_value;
+ bool replace = false;
+ // TODO: Evaluate UniOp
+ switch( se.op )
+ {
+ case ::MIR::eUniOp::INV:
+ TU_MATCHA( (val), (ve),
+ (Uint,
+ auto val = ve.v;
+ switch(ve.t)
+ {
+ case ::HIR::CoreType::U8: val = (~val) & 0xFF; break;
+ case ::HIR::CoreType::U16: val = (~val) & 0xFFFF; break;
+ case ::HIR::CoreType::U32: val = (~val) & 0xFFFFFFFF; break;
+ case ::HIR::CoreType::Usize:
+ case ::HIR::CoreType::U64:
+ val = ~val;
+ break;
+ case ::HIR::CoreType::U128:
+ val = ~val;
+ break;
+ case ::HIR::CoreType::Char:
+ MIR_BUG(state, "Invalid use of ! on char");
+ break;
+ default:
+ // Invalid type for Uint literal
+ break;
+ }
+ new_value = ::MIR::Constant::make_Uint({ val, ve.t });
+ replace = true;
+ ),
+ (Int,
+ // Is ! valid on Int?
+ ),
+ (Float,
+ // Not valid?
+ ),
+ (Bool,
+ new_value = ::MIR::Constant::make_Bool({ !ve.v });
+ replace = true;
+ ),
+ (Bytes, ),
+ (StaticString, ),
+ (Const,
+ // TODO:
+ ),
+ (ItemAddr,
+ )
+ )
+ break;
+ case ::MIR::eUniOp::NEG:
+ TU_MATCHA( (val), (ve),
+ (Uint,
+ // Not valid?
+ ),
+ (Int,
+ new_value = ::MIR::Constant::make_Int({ -ve.v, ve.t });
+ replace = true;
+ ),
+ (Float,
+ new_value = ::MIR::Constant::make_Float({ -ve.v, ve.t });
+ replace = true;
+ ),
+ (Bool,
+ // Not valid?
+ ),
+ (Bytes, ),
+ (StaticString, ),
+ (Const,
+ // TODO:
+ ),
+ (ItemAddr,
+ )
+ )
+ break;
+ }
+ if( replace )
+ {
+ DEBUG(state << " " << e->src << " = " << new_value);
+ e->src = mv$(new_value);
+ }
+ }
+ ),
+ (DstMeta,
+ ),
+ (DstPtr,
+ ),
+ (MakeDst,
+ check_param(se.ptr_val);
+ check_param(se.meta_val);
+ ),
+ (Tuple,
+ for(auto& p : se.vals)
+ check_param(p);
+ ),
+ (Array,
+ for(auto& p : se.vals)
+ check_param(p);
+ ),
+ (Variant,
+ check_param(se.val);
+ ),
+ (Struct,
+ for(auto& p : se.vals)
+ check_param(p);
+ )
+ )
+ }
+ else if( const auto* se = stmt.opt_SetDropFlag() )
+ {
+ if( se->other == ~0u )
+ {
+ known_drop_flags.insert(::std::make_pair( se->idx, se->new_val ));
+ }
+ else
+ {
+ auto it = known_drop_flags.find(se->other);
+ if( it != known_drop_flags.end() )
+ {
+ known_drop_flags.insert(::std::make_pair( se->idx, se->new_val ^ it->second ));
+ }
+ }
+ }
+ else if( auto* se = stmt.opt_Drop() )
+ {
+ if( se->flag_idx != ~0u )
+ {
+ auto it = known_drop_flags.find(se->flag_idx);
+ if( it != known_drop_flags.end() )
+ {
+ if( it->second ) {
+ se->flag_idx = ~0u;
+ }
+ else {
+ // TODO: Delete drop
+ stmt = ::MIR::Statement::make_ScopeEnd({ });
+ }
+ }
+ }
+ }
+ // - If a known temporary is borrowed mutably or mutated somehow, clear its knowledge
+ visit_mir_lvalues(stmt, [&known_values](const ::MIR::LValue& lv, ValUsage vu)->bool {
+ if( vu == ValUsage::Write ) {
+ known_values.erase(lv);
+ }
+ return false;
+ });
+ // - Locate `temp = SOME_CONST` and record value
+ if( const auto* e = stmt.opt_Assign() )
+ {
+ if( e->dst.is_Temporary() || e->dst.is_Variable() )
+ {
+ if( const auto* ce = e->src.opt_Constant() )
+ {
+ known_values.insert(::std::make_pair( e->dst.clone(), ce->clone() ));
+ }
+ }
+ }
+ }
+
+ state.set_cur_stmt_term(bbidx);
+ }
+
+ // - Remove based on known booleans within a single block
+ // > Eliminates `if false`/`if true` branches
for(auto& bb : fcn.blocks)
{
auto bbidx = &bb - &fcn.blocks.front();
@@ -1612,12 +1711,15 @@ bool MIR_Optimise_ConstPropagte(::MIR::TypeResolve& state, ::MIR::Function& fcn)
if( se.dst != te.cond )
continue;
- if( !se.src.is_Constant() )
- continue;
- if( !se.src.as_Constant().is_Bool() )
- continue;
- val_known = true;
- known_val = se.src.as_Constant().as_Bool().v;
+ if( se.src.is_Constant() && se.src.as_Constant().is_Bool() )
+ {
+ val_known = true;
+ known_val = se.src.as_Constant().as_Bool().v;
+ }
+ else
+ {
+ val_known = false;
+ }
break;
}
else
@@ -1759,6 +1861,12 @@ bool MIR_Optimise_PropagateSingleAssignments(::MIR::TypeResolve& state, ::MIR::F
srcp = &*srcp->as_Field().val;
if( !( srcp->is_Temporary() || srcp->is_Variable() || srcp->is_Argument() ) )
continue ;
+
+ if( replacements.find(*srcp) != replacements.end() )
+ {
+ DEBUG("> Can't replace, source has pending replacement");
+ continue;
+ }
}
// TODO: Allow any rvalue, but that currently breaks due to chaining
//else if( e.src.is_Borrow() )
@@ -1785,6 +1893,7 @@ bool MIR_Optimise_PropagateSingleAssignments(::MIR::TypeResolve& state, ::MIR::F
for(unsigned int si2 = stmt_idx+1; si2 < block.statements.size(); si2 ++)
{
const auto& stmt2 = block.statements[si2];
+ DEBUG("[find usage] " << stmt2);
// Usage found.
if( visit_mir_lvalues(stmt2, is_lvalue_usage) )
@@ -1808,7 +1917,7 @@ bool MIR_Optimise_PropagateSingleAssignments(::MIR::TypeResolve& state, ::MIR::F
// Determine if source is mutated.
// > Assume that any mutating access of the root value counts (over-cautious)
- if( visit_mir_lvalues(block.statements[si2], [&](const auto& lv, auto vu){ return /*vu == ValUsage::Write &&*/ is_lvalue_in_val(lv); }) )
+ if( visit_mir_lvalues(stmt2, [&](const auto& lv, auto vu){ return /*vu == ValUsage::Write &&*/ is_lvalue_in_val(lv); }) )
{
stop = true;
break;
@@ -1816,7 +1925,7 @@ bool MIR_Optimise_PropagateSingleAssignments(::MIR::TypeResolve& state, ::MIR::F
}
if( !stop )
{
- DEBUG(block.terminator);
+ DEBUG("[find usage] " << block.terminator);
TU_MATCHA( (block.terminator), (e),
(Incomplete,
),
@@ -2212,11 +2321,11 @@ bool MIR_Optimise_GarbageCollect(::MIR::TypeResolve& state, ::MIR::Function& fcn
TU_IFLET( ::MIR::Statement, stmt, Assign, e,
assigned_lval(e.dst);
)
- else if( const auto* e = stmt.opt_Drop() )
- {
- if( e->flag_idx != ~0u )
- used_dfs.at(e->flag_idx) = true;
- }
+ //else if( const auto* e = stmt.opt_Drop() )
+ //{
+ // //if( e->flag_idx != ~0u )
+ // // used_dfs.at(e->flag_idx) = true;
+ //}
else if( const auto* e = stmt.opt_Asm() )
{
for(const auto& val : e->outputs)
@@ -2226,6 +2335,7 @@ bool MIR_Optimise_GarbageCollect(::MIR::TypeResolve& state, ::MIR::Function& fcn
{
if( e->other != ~0u )
used_dfs.at(e->other) = true;
+ used_dfs.at(e->idx) = true;
}
}
@@ -2275,11 +2385,22 @@ bool MIR_Optimise_GarbageCollect(::MIR::TypeResolve& state, ::MIR::Function& fcn
{
if( !used_temps[i] )
{
- DEBUG("GC Temporary(" << i << ")");
fcn.temporaries.erase(fcn.temporaries.begin() + j);
}
+ else {
+ DEBUG("tmp$" << i << " => tmp$" << j);
+ }
temp_rewrite_table.push_back( used_temps[i] ? j ++ : ~0u );
}
+ DEBUG("Deleted Temporaries:" << FMT_CB(ss,
+ for(auto run : runs(used_temps))
+ if( !used_temps[run.first] )
+ {
+ ss << " " << run.first;
+ if(run.second != run.first)
+ ss << "-" << run.second;
+ }
+ ));
::std::vector<unsigned int> var_rewrite_table;
unsigned int n_var = fcn.named_variables.size();
for(unsigned int i = 0, j = 0; i < n_var; i ++)
@@ -2289,6 +2410,9 @@ bool MIR_Optimise_GarbageCollect(::MIR::TypeResolve& state, ::MIR::Function& fcn
DEBUG("GC Variable(" << i << ")");
fcn.named_variables.erase(fcn.named_variables.begin() + j);
}
+ else {
+ DEBUG("var$" << i << " => var$" << j);
+ }
var_rewrite_table.push_back( used_vars[i] ? j ++ : ~0u );
}
::std::vector<unsigned int> df_rewrite_table;
@@ -2298,7 +2422,7 @@ bool MIR_Optimise_GarbageCollect(::MIR::TypeResolve& state, ::MIR::Function& fcn
if( !used_dfs[i] )
{
DEBUG("GC df" << i);
- fcn.drop_flags.erase(fcn.drop_flags.begin() + j);
+ // NOTE: Not erased until after rewriting
}
df_rewrite_table.push_back( used_dfs[i] ? j ++ : ~0u );
}
@@ -2329,27 +2453,75 @@ bool MIR_Optimise_GarbageCollect(::MIR::TypeResolve& state, ::MIR::Function& fcn
}
return false;
};
- for(auto stmt_it = it->statements.begin(); stmt_it != it->statements.end(); ++ stmt_it)
+ ::std::vector<bool> to_remove_statements(it->statements.size());
+ for(auto& stmt : it->statements)
{
- state.set_cur_stmt(i, stmt_it - it->statements.begin());
- visit_mir_lvalues_mut(*stmt_it, lvalue_cb);
- if( auto* se = stmt_it->opt_Drop() )
+ auto stmt_idx = &stmt - &it->statements.front();
+ state.set_cur_stmt(i, stmt_idx);
+ if( auto* se = stmt.opt_Drop() )
+ {
+ // If the drop flag was unset, either remove the drop or remove the drop flag reference
+ if( se->flag_idx != ~0u && df_rewrite_table[se->flag_idx] == ~0u)
+ {
+ if( fcn.drop_flags.at(se->flag_idx) ) {
+ DEBUG(state << "Remove flag from " << stmt);
+ se->flag_idx = ~0u;
+ }
+ else {
+ DEBUG(state << "Remove " << stmt);
+ to_remove_statements[stmt_idx] = true;
+ continue ;
+ }
+ }
+ }
+
+ visit_mir_lvalues_mut(stmt, lvalue_cb);
+ if( auto* se = stmt.opt_Drop() )
{
// Rewrite drop flag indexes
if( se->flag_idx != ~0u )
se->flag_idx = df_rewrite_table[se->flag_idx];
}
- else if( auto* se = stmt_it->opt_SetDropFlag() )
+ else if( auto* se = stmt.opt_SetDropFlag() )
{
// Rewrite drop flag indexes OR delete
if( df_rewrite_table[se->idx] == ~0u ) {
- stmt_it = it->statements.erase(stmt_it)-1;
+ to_remove_statements[stmt_idx] = true;
continue ;
}
se->idx = df_rewrite_table[se->idx];
if( se->other != ~0u )
se->other = df_rewrite_table[se->other];
}
+ else if( auto* se = stmt.opt_ScopeEnd() )
+ {
+ for(auto it = se->vars.begin(); it != se->vars.end(); )
+ {
+ if( var_rewrite_table.at(*it) == ~0u ) {
+ it = se->vars.erase(it);
+ }
+ else {
+ *it = var_rewrite_table.at(*it);
+ ++ it;
+ }
+ }
+ for(auto it = se->tmps.begin(); it != se->tmps.end(); )
+ {
+ if( temp_rewrite_table.at(*it) == ~0u ) {
+ it = se->tmps.erase(it);
+ }
+ else {
+ *it = temp_rewrite_table.at(*it);
+ ++ it;
+ }
+ }
+
+ if( se->vars.empty() && se->tmps.empty() ) {
+ DEBUG(state << "Delete ScopeEnd (now empty)");
+ to_remove_statements[stmt_idx] = true;
+ continue ;
+ }
+ }
}
state.set_cur_stmt_term(i);
// Rewrite and advance
@@ -2387,10 +2559,34 @@ bool MIR_Optimise_GarbageCollect(::MIR::TypeResolve& state, ::MIR::Function& fcn
)
)
+ // Delete all statements flagged in a bitmap for deletion
+ auto stmt_it = it->statements.begin();
+ for(auto flag : to_remove_statements)
+ {
+ if(flag) {
+ stmt_it = it->statements.erase(stmt_it);
+ }
+ else {
+ ++ stmt_it;
+ }
+ }
+
++it;
}
}
+ for(unsigned int i = 0, j = 0; i < n_df; i ++)
+ {
+ if( !used_dfs[i] )
+ {
+ fcn.drop_flags.erase(fcn.drop_flags.begin() + j);
+ }
+ else
+ {
+ j ++;
+ }
+ }
+
// TODO: Detect if any optimisations happened, and return true in that case
return false;
}
diff --git a/src/parse/expr.cpp b/src/parse/expr.cpp
index 6227dca4..6dd9bd75 100644
--- a/src/parse/expr.cpp
+++ b/src/parse/expr.cpp
@@ -119,6 +119,16 @@ ExprNodeP Parse_ExprBlockLine_WithItems(TokenStream& lex, ::std::shared_ptr<AST:
Parse_Mod_Item(lex, *local_mod, mv$(item_attrs));
return ExprNodeP();
}
+
+ if( tok.type() == TOK_MACRO && tok.str() == "macro_rules" )
+ {
+ // Special case - create a local module if macro_rules! is seen
+ // - Allows correct scoping of defined macros
+ if( !local_mod ) {
+ local_mod = lex.parse_state().get_current_mod().add_anon();
+ }
+ }
+
switch(tok.type())
{
// Items:
@@ -669,6 +679,9 @@ bool Parse_IsTokValue(eTokenType tok_type)
case TOK_PAREN_OPEN:
case TOK_SQUARE_OPEN:
+ case TOK_INTERPOLATED_PATH:
+ case TOK_INTERPOLATED_EXPR:
+
case TOK_MACRO:
case TOK_PIPE:
diff --git a/src/parse/lex.cpp b/src/parse/lex.cpp
index caa640f9..3c4bd795 100644
--- a/src/parse/lex.cpp
+++ b/src/parse/lex.cpp
@@ -860,9 +860,13 @@ uint32_t Lexer::parseEscape(char enclosing)
case '\n':
while( ch.isspace() )
ch = this->getc();
- this->ungetc();
- if( ch == enclosing )
+ if(ch == '\\' )
+ return parseEscape(enclosing);
+ else if( ch == enclosing )
+ {
+ this->ungetc();
return ~0;
+ }
else
return ch.v;
default:
@@ -1009,12 +1013,12 @@ bool Codepoint::isxdigit() const {
s += (char)(0xC0 | ((cp.v >> 6) & 0x1F));
s += (char)(0x80 | ((cp.v >> 0) & 0x3F));
}
- else if( cp.v <= (0x0F+1)<<(2*6) ) {
+ else if( cp.v < (0x0F+1)<<(2*6) ) {
s += (char)(0xE0 | ((cp.v >> 12) & 0x0F));
s += (char)(0x80 | ((cp.v >> 6) & 0x3F));
s += (char)(0x80 | ((cp.v >> 0) & 0x3F));
}
- else if( cp.v <= (0x07+1)<<(3*6) ) {
+ else if( cp.v < (0x07+1)<<(3*6) ) {
s += (char)(0xF0 | ((cp.v >> 18) & 0x07));
s += (char)(0x80 | ((cp.v >> 12) & 0x3F));
s += (char)(0x80 | ((cp.v >> 6) & 0x3F));
@@ -1034,12 +1038,12 @@ bool Codepoint::isxdigit() const {
os << (char)(0xC0 | ((cp.v >> 6) & 0x1F));
os << (char)(0x80 | ((cp.v >> 0) & 0x3F));
}
- else if( cp.v <= (0x0F+1)<<(2*6) ) {
+ else if( cp.v < (0x0F+1)<<(2*6) ) {
os << (char)(0xE0 | ((cp.v >> 12) & 0x0F));
os << (char)(0x80 | ((cp.v >> 6) & 0x3F));
os << (char)(0x80 | ((cp.v >> 0) & 0x3F));
}
- else if( cp.v <= (0x07+1)<<(2*6) ) {
+ else if( cp.v < (0x07+1)<<(2*6) ) {
os << (char)(0xF0 | ((cp.v >> 18) & 0x07));
os << (char)(0x80 | ((cp.v >> 12) & 0x3F));
os << (char)(0x80 | ((cp.v >> 6) & 0x3F));
diff --git a/src/parse/root.cpp b/src/parse/root.cpp
index aefd5a13..d40e1f95 100644
--- a/src/parse/root.cpp
+++ b/src/parse/root.cpp
@@ -44,13 +44,13 @@ AST::MetaItem Parse_MetaItem(TokenStream& lex);
void Parse_ModRoot(TokenStream& lex, AST::Module& mod, AST::MetaItems& mod_attrs);
//::AST::Path Parse_Publicity(TokenStream& lex)
-bool Parse_Publicity(TokenStream& lex)
+bool Parse_Publicity(TokenStream& lex, bool allow_restricted=true)
{
Token tok;
if( LOOK_AHEAD(lex) == TOK_RWORD_PUB )
{
GET_TOK(tok, lex);
- if( LOOK_AHEAD(lex) == TOK_PAREN_OPEN )
+ if( allow_restricted && LOOK_AHEAD(lex) == TOK_PAREN_OPEN )
{
auto path = AST::Path("", {});
// Restricted publicity.
@@ -541,7 +541,7 @@ AST::Struct Parse_Struct(TokenStream& lex, const AST::MetaItems& meta_items)
SET_ATTRS(lex, item_attrs);
PUTBACK(tok, lex);
- bool is_pub = Parse_Publicity(lex);
+ bool is_pub = Parse_Publicity(lex, /*allow_restricted=*/false); // HACK: Disable `pub(restricted)` syntax in tuple structs, due to ambiguity
refs.push_back( AST::TupleItem( mv$(item_attrs), is_pub, Parse_Type(lex) ) );
if( GET_TOK(tok, lex) != TOK_COMMA )
@@ -1336,7 +1336,6 @@ void Parse_Use(TokenStream& lex, ::std::function<void(AST::UseStmt, ::std::strin
{
GET_CHECK_TOK(tok, lex, TOK_STRING);
path = ::AST::Path(tok.str(), {});
- GET_CHECK_TOK(tok, lex, TOK_DOUBLE_COLON);
}
else {
PUTBACK(tok, lex);
@@ -1387,7 +1386,7 @@ void Parse_Use(TokenStream& lex, ::std::function<void(AST::UseStmt, ::std::strin
else
{
PUTBACK(tok, lex);
- assert(path.nodes().size() > 0);
+ ASSERT_BUG(lex.getPosition(), path.nodes().size() > 0, "`use` with no path");
name = path.nodes().back().name();
}
diff --git a/src/parse/token.cpp b/src/parse/token.cpp
index c7d11d03..05c4dbe6 100644
--- a/src/parse/token.cpp
+++ b/src/parse/token.cpp
@@ -289,16 +289,16 @@ struct EscapedString {
case TOK_NEWLINE: return "\n";
case TOK_WHITESPACE: return " ";
case TOK_COMMENT: return "/*" + m_data.as_String() + "*/";
- case TOK_INTERPOLATED_TYPE: return "/*:ty*/";
- case TOK_INTERPOLATED_PATH: return "/*:path*/";
- case TOK_INTERPOLATED_PATTERN: return "/*:pat*/";
+ case TOK_INTERPOLATED_TYPE: return FMT( *reinterpret_cast<const ::TypeRef*>(m_data.as_Fragment()) );
+ case TOK_INTERPOLATED_PATH: return FMT( *reinterpret_cast<const ::AST::Path*>(m_data.as_Fragment()) );
+ case TOK_INTERPOLATED_PATTERN: return FMT( *reinterpret_cast<const ::AST::Pattern*>(m_data.as_Fragment()) );
+ case TOK_INTERPOLATED_STMT:
+ case TOK_INTERPOLATED_BLOCK:
case TOK_INTERPOLATED_EXPR: {
::std::stringstream ss;
reinterpret_cast<const ::AST::ExprNode*>(m_data.as_Fragment())->print(ss);
return ss.str();
}
- case TOK_INTERPOLATED_STMT: return "/*:stmt*/";
- case TOK_INTERPOLATED_BLOCK: return "/*:block*/";
case TOK_INTERPOLATED_META: return "/*:meta*/";
case TOK_INTERPOLATED_ITEM: return "/*:item*/";
case TOK_INTERPOLATED_IDENT: return "/*:ident*/";
diff --git a/src/trans/codegen.cpp b/src/trans/codegen.cpp
index 6a581d93..9d536181 100644
--- a/src/trans/codegen.cpp
+++ b/src/trans/codegen.cpp
@@ -160,8 +160,7 @@ void Trans_Codegen(const ::std::string& outfile, const TransOptions& opt, const
auto mir = Trans_Monomorphise(resolve, pp, fcn.m_code.m_mir);
MIR_Validate(resolve, ::HIR::ItemPath(""), *mir, args, ret_type);
MIR_Cleanup(resolve, ::HIR::ItemPath(""), *mir, args, ret_type);
- // TODO: MIR Optimisation
- //MIR_Optimise(resolve, ::HIR::ItemPath(""), *mir, args, ret_type);
+ MIR_Optimise(resolve, ::HIR::ItemPath(""), *mir, args, ret_type);
MIR_Validate(resolve, ::HIR::ItemPath(""), *mir, args, ret_type);
codegen->emit_function_code(path, fcn, ent.second->pp, mir);
}
diff --git a/src/trans/codegen_c.cpp b/src/trans/codegen_c.cpp
index 5e365ceb..2d7a521d 100644
--- a/src/trans/codegen_c.cpp
+++ b/src/trans/codegen_c.cpp
@@ -65,7 +65,7 @@ namespace {
<< "typedef struct { } tTYPEID;\n"
<< "typedef struct { void* PTR; size_t META; } SLICE_PTR;\n"
<< "typedef struct { void* PTR; void* META; } TRAITOBJ_PTR;\n"
- << "typedef struct { size_t size; size_t align; } VTABLE_HDR;\n"
+ << "typedef struct { size_t size; size_t align; void (*drop)(void*); } VTABLE_HDR;\n"
<< "\n"
<< "extern void _Unwind_Resume(void) __attribute__((noreturn));\n"
<< "\n"
@@ -84,7 +84,7 @@ namespace {
<< "\treturn (v >> 32 != 0 ? __builtin_clz(v>>32) : 32 + __builtin_clz(v));\n"
<< "}\n"
<< "static inline uint64_t __builtin_ctz64(uint64_t v) {\n"
- << "\treturn (v&0xFFFFFFFF == 0 ? __builtin_ctz(v>>32) + 32 : __builtin_ctz(v));\n"
+ << "\treturn ((v&0xFFFFFFFF) == 0 ? __builtin_ctz(v>>32) + 32 : __builtin_ctz(v));\n"
<< "}\n"
<< "static inline unsigned __int128 __builtin_bswap128(unsigned __int128 v) {\n"
<< "\tuint64_t lo = __builtin_bswap64((uint64_t)v);\n"
@@ -95,9 +95,11 @@ namespace {
<< "\treturn (v >> 64 != 0 ? __builtin_clz64(v>>64) : 64 + __builtin_clz64(v));\n"
<< "}\n"
<< "static inline unsigned __int128 __builtin_ctz128(unsigned __int128 v) {\n"
- << "\treturn (v&0xFFFFFFFFFFFFFFFF == 0 ? __builtin_ctz64(v>>64) + 64 : __builtin_ctz64(v));\n"
+ << "\treturn ((v&0xFFFFFFFFFFFFFFFF) == 0 ? __builtin_ctz64(v>>64) + 64 : __builtin_ctz64(v));\n"
<< "}\n"
<< "\n"
+ << "static inline void noop_drop(void *p) {}\n"
+ << "\n"
;
}
@@ -134,6 +136,7 @@ namespace {
::std::vector<::std::string> tmp;
::std::vector<const char*> args;
args.push_back( getenv("CC") ? getenv("CC") : "gcc" );
+ args.push_back("-ffunction-sections");
args.push_back("-pthread");
switch(opt.opt_level)
{
@@ -189,6 +192,7 @@ namespace {
::std::stringstream cmd_ss;
for(const auto& arg : args)
{
+ // TODO: use a formatter specific to shell escaping
cmd_ss << "\"" << FmtEscaped(arg) << "\" ";
}
DEBUG("- " << cmd_ss.str());
@@ -310,6 +314,21 @@ namespace {
}
m_of << "} "; emit_ctype(ty); m_of << ";\n";
}
+
+ auto drop_glue_path = ::HIR::Path(ty.clone(), "#drop_glue");
+ auto args = ::std::vector< ::std::pair<::HIR::Pattern,::HIR::TypeRef> >();
+ auto ty_ptr = ::HIR::TypeRef::new_pointer(::HIR::BorrowType::Owned, ty.clone());
+ ::MIR::TypeResolve mir_res { sp, m_resolve, FMT_CB(ss, ss << drop_glue_path;), ty_ptr, args, *(::MIR::Function*)nullptr };
+ m_mir_res = &mir_res;
+ m_of << "void " << Trans_Mangle(drop_glue_path) << "("; emit_ctype(ty); m_of << "* rv) {";
+ auto self = ::MIR::LValue::make_Deref({ box$(::MIR::LValue::make_Return({})) });
+ auto fld_lv = ::MIR::LValue::make_Field({ box$(self), 0 });
+ for(const auto& ity : te)
+ {
+ emit_destructor_call(fld_lv, ity, /*unsized_valid=*/false);
+ fld_lv.as_Field().field_index ++;
+ }
+ m_of << "}\n";
)
else TU_IFLET( ::HIR::TypeRef::Data, ty.m_data, Function, te,
emit_type_fn(ty);
@@ -406,7 +425,7 @@ namespace {
::std::vector< ::std::pair<::HIR::Pattern,::HIR::TypeRef> > args;
if( item.m_markings.has_drop_impl ) {
- m_of << "tUNIT " << Trans_Mangle( ::HIR::Path(struct_ty.clone(), m_resolve.m_lang_Drop, "drop") ) << "(struct s_" << Trans_Mangle(p) << "*rv);\n";
+ m_of << "tUNIT " << Trans_Mangle( ::HIR::Path(struct_ty.clone(), m_resolve.m_lang_Drop, "drop") ) << "("; emit_ctype(struct_ty_ptr, FMT_CB(ss, ss << "rv";)); m_of << ");\n";
}
else if( m_resolve.is_type_owned_box(struct_ty) )
{
@@ -968,10 +987,10 @@ namespace {
m_of << "}";
}
}
- else if( enm.m_repr != ::HIR::Enum::Repr::Rust || ::std::all_of(enm.m_variants.begin(), enm.m_variants.end(), [](const auto& x){return x.second.is_Unit() || x.second.is_Value();}) )
+ else if( enm.is_value() )
{
MIR_ASSERT(*m_mir_res, e.vals.empty(), "Value-only enum with fields");
- m_of << "{" << e.idx << "}";
+ m_of << "{" << enm.get_value(e.idx) << "}";
}
else
{
@@ -1178,7 +1197,14 @@ namespace {
m_of << "\t{ ";
m_of << "sizeof("; emit_ctype(type); m_of << "),";
m_of << "__alignof__("; emit_ctype(type); m_of << "),";
- // TODO: Drop glue pointer
+ if( type.m_data.is_Borrow() || m_resolve.type_is_copy(sp, type) )
+ {
+ m_of << "noop_drop,";
+ }
+ else
+ {
+ m_of << "(void*)" << Trans_Mangle(::HIR::Path(type.clone(), "#drop_glue")) << ",";
+ }
m_of << "}"; // No newline, added below
for(unsigned int i = 0; i < trait.m_value_indexes.size(); i ++ )
@@ -1288,6 +1314,9 @@ namespace {
switch( stmt.tag() )
{
case ::MIR::Statement::TAGDEAD: throw "";
+ case ::MIR::Statement::TAG_ScopeEnd:
+ m_of << "// " << stmt << "\n";
+ break;
case ::MIR::Statement::TAG_SetDropFlag: {
const auto& e = stmt.as_SetDropFlag();
m_of << "\tdf" << e.idx << " = ";
@@ -1303,7 +1332,7 @@ namespace {
const auto& ty = mir_res.get_lvalue_type(tmp, e.slot);
if( e.flag_idx != ~0u )
- m_of << "if( df" << e.flag_idx << " ) {\n";
+ m_of << "\tif( df" << e.flag_idx << " ) {\n";
switch( e.kind )
{
@@ -1326,7 +1355,7 @@ namespace {
break;
}
if( e.flag_idx != ~0u )
- m_of << "}\n";
+ m_of << "\t}\n";
break; }
case ::MIR::Statement::TAG_Asm: {
const auto& e = stmt.as_Asm();
@@ -1350,6 +1379,7 @@ namespace {
m_of << "\t__asm__ ";
if(is_volatile) m_of << "__volatile__";
// TODO: Convert format string?
+ // TODO: Use a C-specific escaper here.
m_of << "(\"" << (is_intel ? ".syntax intel; " : "") << FmtEscaped(e.tpl) << (is_intel ? ".syntax att; " : "") << "\"";
m_of << ": ";
for(unsigned int i = 0; i < e.outputs.size(); i ++ )
@@ -1498,9 +1528,8 @@ namespace {
(BinOp,
emit_lvalue(e.dst);
m_of << " = ";
- MIR_ASSERT(mir_res, ve.val_l.is_LValue() || ve.val_r.is_LValue(), "");
::HIR::TypeRef tmp;
- const auto& ty = mir_res.get_lvalue_type(tmp, ve.val_l.is_LValue() ? ve.val_l.as_LValue() : ve.val_r.as_LValue());
+ const auto& ty = ve.val_l.is_LValue() ? mir_res.get_lvalue_type(tmp, ve.val_l.as_LValue()) : tmp = mir_res.get_const_type(ve.val_l.as_Constant());
if( ty.m_data.is_Borrow() ) {
m_of << "(slice_cmp("; emit_param(ve.val_l); m_of << ", "; emit_param(ve.val_r); m_of << ")";
switch(ve.op)
@@ -1646,43 +1675,65 @@ namespace {
}
),
(Variant,
- if( m_crate.get_typeitem_by_path(sp, ve.path.m_path).is_Union() )
+ const auto& tyi = m_crate.get_typeitem_by_path(sp, ve.path.m_path);
+ if( tyi.is_Union() )
{
emit_lvalue(e.dst);
+ m_of << ".var_" << ve.index << " = "; emit_param(ve.val);
}
- else
+ else if( const auto* enm_p = tyi.opt_Enum() )
{
MIR_TODO(mir_res, "Construct enum with RValue::Variant");
- emit_lvalue(e.dst); m_of << ".TAG = " << ve.index << ";\n\t";
- emit_lvalue(e.dst); m_of << ".DATA";
+ if( enm_p->is_value() )
+ {
+ emit_lvalue(e.dst); m_of << ".TAG = " << enm_p->get_value(ve.index) << "";
+ }
+ else
+ {
+ emit_lvalue(e.dst); m_of << ".TAG = " << ve.index << ";\n\t";
+ emit_lvalue(e.dst); m_of << ".DATA";
+ m_of << ".var_" << ve.index << " = "; emit_param(ve.val);
+ }
+ }
+ else
+ {
+ BUG(mir_res.sp, "Unexpected type in Variant");
}
- m_of << ".var_" << ve.index << " = "; emit_param(ve.val);
),
(Struct,
- if(ve.variant_idx != ~0u) {
+ if(ve.variant_idx != ~0u)
+ {
::HIR::TypeRef tmp;
const auto& ty = mir_res.get_lvalue_type(tmp, e.dst);
- if( ty.m_data.as_Path().binding.is_Enum() ) {
- auto it = m_enum_repr_cache.find(ty.m_data.as_Path().path.m_data.as_Generic());
- if( it != m_enum_repr_cache.end() )
- {
- if( ve.variant_idx == 0 ) {
- // TODO: Use nonzero_path
- m_of << "memset(&"; emit_lvalue(e.dst); m_of << ", 0, sizeof("; emit_ctype(ty); m_of << "))";
- }
- else if( ve.variant_idx == 1 ) {
- emit_lvalue(e.dst);
- m_of << "._0 = ";
- emit_param(ve.vals[0]);
- }
- else {
- }
- break;
+ const auto* enm_p = ty.m_data.as_Path().binding.as_Enum();
+
+ auto it = m_enum_repr_cache.find(ty.m_data.as_Path().path.m_data.as_Generic());
+ if( it != m_enum_repr_cache.end() )
+ {
+ if( ve.variant_idx == 0 ) {
+ // TODO: Use nonzero_path
+ m_of << "memset(&"; emit_lvalue(e.dst); m_of << ", 0, sizeof("; emit_ctype(ty); m_of << "))";
+ }
+ else if( ve.variant_idx == 1 ) {
+ emit_lvalue(e.dst);
+ m_of << "._0 = ";
+ emit_param(ve.vals[0]);
+ }
+ else {
}
+ break;
+ }
+ else if( enm_p->is_value() )
+ {
+ emit_lvalue(e.dst);
+ m_of << ".TAG = " << enm_p->get_value(ve.variant_idx);
+ assert(ve.vals.size() == 0);
+ }
+ else
+ {
+ emit_lvalue(e.dst);
+ m_of << ".TAG = " << ve.variant_idx;
}
-
- emit_lvalue(e.dst);
- m_of << ".TAG = " << ve.variant_idx;
if(ve.vals.size() > 0)
m_of << ";\n\t";
}
@@ -1736,6 +1787,7 @@ namespace {
const auto& ty = mir_res.get_lvalue_type(tmp, e.val);
MIR_ASSERT(mir_res, ty.m_data.is_Path(), "");
MIR_ASSERT(mir_res, ty.m_data.as_Path().binding.is_Enum(), "");
+ const auto* enm = ty.m_data.as_Path().binding.as_Enum();
auto it = m_enum_repr_cache.find( ty.m_data.as_Path().path.m_data.as_Generic() );
if( it != m_enum_repr_cache.end() )
{
@@ -1745,6 +1797,16 @@ namespace {
m_of << "\telse\n";
m_of << "\t\tgoto bb" << e.targets[0] << ";\n";
}
+ else if( enm->is_value() )
+ {
+ m_of << "\tswitch("; emit_lvalue(e.val); m_of << ".TAG) {\n";
+ for(unsigned int j = 0; j < e.targets.size(); j ++)
+ {
+ m_of << "\t\tcase " << enm->get_value(j) << ": goto bb" << e.targets[j] << ";\n";
+ }
+ m_of << "\t\tdefault: abort();\n";
+ m_of << "\t}\n";
+ }
else
{
m_of << "\tswitch("; emit_lvalue(e.val); m_of << ".TAG) {\n";
@@ -2046,7 +2108,16 @@ namespace {
emit_lvalue(e.ret_val); m_of << ".META = " << s.size() << "";
}
else if( name == "transmute" ) {
- m_of << "memcpy( &"; emit_lvalue(e.ret_val); m_of << ", &"; emit_param(e.args.at(0)); m_of << ", sizeof("; emit_ctype(params.m_types.at(0)); m_of << "))";
+ if( e.args.at(0).is_Constant() )
+ {
+ m_of << "{ "; emit_ctype(params.m_types.at(1), FMT_CB(s, s << "v";)); m_of << " = "; emit_param(e.args.at(0)); m_of << ";";
+ m_of << "memcpy( &"; emit_lvalue(e.ret_val); m_of << ", &v, sizeof("; emit_ctype(params.m_types.at(0)); m_of << ")); ";
+ m_of << "}";
+ }
+ else
+ {
+ m_of << "memcpy( &"; emit_lvalue(e.ret_val); m_of << ", &"; emit_param(e.args.at(0)); m_of << ", sizeof("; emit_ctype(params.m_types.at(0)); m_of << "))";
+ }
}
else if( name == "copy_nonoverlapping" || name == "copy" ) {
if( name == "copy" ) {
@@ -2077,15 +2148,15 @@ namespace {
else if( name == "needs_drop" ) {
// Returns `true` if the actual type given as `T` requires drop glue;
// returns `false` if the actual type provided for `T` implements `Copy`. (Either otherwise)
+ // NOTE: libarena assumes that this returns `true` iff T doesn't require drop glue.
const auto& ty = params.m_types.at(0);
emit_lvalue(e.ret_val);
m_of << " = ";
- if( m_resolve.type_is_copy(Span(), ty) ) {
- m_of << "false";
+ if( m_resolve.type_needs_drop_glue(mir_res.sp, ty) ) {
+ m_of << "true";
}
- // If T: !Copy, return true
else {
- m_of << "true";
+ m_of << "false";
}
}
else if( name == "uninit" ) {
@@ -2299,6 +2370,13 @@ namespace {
else if( name == "fmaf32" || name == "fmaf64" ) {
emit_lvalue(e.ret_val); m_of << " = fma" << (name.back()=='2'?"f":"") << "("; emit_param(e.args.at(0)); m_of << ", "; emit_param(e.args.at(1)); m_of << ", "; emit_param(e.args.at(1)); m_of << ")";
}
+ // --- Volatile Load/Store
+ else if( name == "volatile_load" ) {
+ emit_lvalue(e.ret_val); m_of << " = *(volatile "; emit_ctype(params.m_types.at(0)); m_of << "*)"; emit_param(e.args.at(0));
+ }
+ else if( name == "volatile_store" ) {
+ m_of << "*(volatile "; emit_ctype(params.m_types.at(0)); m_of << "*)"; emit_param(e.args.at(0)); m_of << " = "; emit_param(e.args.at(1));
+ }
// --- Atomics!
// > Single-ordering atomics
else if( name == "atomic_xadd" || name.compare(0, 7+4+1, "atomic_xadd_") == 0 ) {
@@ -2362,9 +2440,20 @@ namespace {
auto fail_ordering = H::get_atomic_ordering(mir_res, name, 7+10+4);
emit_atomic_cxchg(e, "memory_order_seq_cst", fail_ordering, true);
}
- else if( name == "atomic_cxchgweak" || name.compare(0, 7+9+1, "atomic_cxchgweak_") == 0 ) {
- auto ordering = H::get_atomic_ordering(mir_res, name, 7+9+1);
- emit_atomic_cxchg(e, ordering, ordering, true);
+ else if( name == "atomic_cxchgweak" ) {
+ emit_atomic_cxchg(e, "memory_order_seq_cst", "memory_order_seq_cst", true);
+ }
+ else if( name == "atomic_cxchgweak_acq" ) {
+ emit_atomic_cxchg(e, "memory_order_acquire", "memory_order_acquire", true);
+ }
+ else if( name == "atomic_cxchgweak_rel" ) {
+ emit_atomic_cxchg(e, "memory_order_release", "memory_order_relaxed", true);
+ }
+ else if( name == "atomic_cxchgweak_acqrel" ) {
+ emit_atomic_cxchg(e, "memory_order_acq_rel", "memory_order_acquire", true);
+ }
+ else if( name == "atomic_cxchgweak_relaxed" ) {
+ emit_atomic_cxchg(e, "memory_order_relaxed", "memory_order_relaxed", true);
}
else if( name == "atomic_xchg" || name.compare(0, 7+5, "atomic_xchg_") == 0 ) {
auto ordering = H::get_atomic_ordering(mir_res, name, 7+5);
@@ -2419,7 +2508,17 @@ namespace {
make_fcn = "make_sliceptr"; if(0)
case MetadataType::TraitObject:
make_fcn = "make_traitobjptr";
- m_of << "\t" << Trans_Mangle(p) << "( " << make_fcn << "(&"; emit_lvalue(slot); m_of << ", ";
+ m_of << "\t" << Trans_Mangle(p) << "( " << make_fcn << "(";
+ if( slot.is_Deref() )
+ {
+ emit_lvalue(*slot.as_Deref().val);
+ m_of << ".PTR";
+ }
+ else
+ {
+ m_of << "&"; emit_lvalue(slot);
+ }
+ m_of << ", ";
const auto* lvp = &slot;
while(const auto* le = lvp->opt_Field()) lvp = &*le->val;
MIR_ASSERT(*m_mir_res, lvp->is_Deref(), "Access to unized type without a deref - " << *lvp << " (part of " << slot << ")");
@@ -2454,13 +2553,31 @@ namespace {
),
(TraitObject,
MIR_ASSERT(*m_mir_res, unsized_valid, "Dropping TraitObject without a pointer");
- //MIR_ASSERT(*m_mir_res, slot.is_Deref(), "Dropping a TraitObject through a non-Deref");
// Call destructor in vtable
+ const auto* lvp = &slot;
+ while(const auto* le = lvp->opt_Field()) lvp = &*le->val;
+ MIR_ASSERT(*m_mir_res, lvp->is_Deref(), "Access to unized type without a deref - " << *lvp << " (part of " << slot << ")");
+ m_of << "((VTABLE_HDR*)"; emit_lvalue(*lvp->as_Deref().val); m_of << ".META)->drop(";
+ if( const auto* ve = slot.opt_Deref() )
+ {
+ emit_lvalue(*ve->val); m_of << ".PTR";
+ }
+ else
+ {
+ m_of << "&"; emit_lvalue(slot);
+ }
+ m_of << ");";
),
(Slice,
MIR_ASSERT(*m_mir_res, unsized_valid, "Dropping Slice without a pointer");
- //MIR_ASSERT(*m_mir_res, slot.is_Deref(), "Dropping a slice through a non-Deref");
+ const auto* lvp = &slot;
+ while(const auto* le = lvp->opt_Field()) lvp = &*le->val;
+ MIR_ASSERT(*m_mir_res, lvp->is_Deref(), "Access to unized type without a deref - " << *lvp << " (part of " << slot << ")");
// Call destructor on all entries
+ m_of << "for(unsigned i = 0; i < "; emit_lvalue(*lvp->as_Deref().val); m_of << ".META; i++) {";
+ m_of << "\t\t";
+ emit_destructor_call(::MIR::LValue::make_Index({ box$(slot.clone()), box$(::MIR::LValue::make_Temporary({~0u})) }), *te.inner, false);
+ m_of << "\n\t}";
)
)
}
@@ -2582,10 +2699,10 @@ namespace {
assign_from_literal([&](){ emit_dst(); m_of << "._0"; }, get_inner_type(e.idx, 0), e.vals[0]);
}
}
- else if( enm.m_repr != ::HIR::Enum::Repr::Rust || ::std::all_of(enm.m_variants.begin(), enm.m_variants.end(), [](const auto& x){return x.second.is_Unit() || x.second.is_Value();}) )
+ else if( enm.is_value() )
{
MIR_ASSERT(*m_mir_res, e.vals.empty(), "Value-only enum with fields");
- emit_dst(); m_of << ".TAG = " << e.idx;
+ emit_dst(); m_of << ".TAG = " << enm.get_value(e.idx);
}
else
{
@@ -2645,7 +2762,11 @@ namespace {
if( ' ' <= v && v < 0x7F && v != '"' && v != '\\' )
m_of << v;
else
- m_of << "\\" << (unsigned int)v;
+ {
+ m_of << "\\" << ((unsigned int)v & 0xFF);
+ if( isdigit( *(&v+1) ) )
+ m_of << "\"\"";
+ }
}
m_of << "\"" << ::std::dec;
m_of << ";\n\t";
@@ -2660,7 +2781,10 @@ namespace {
m_of << "var" << e;
),
(Temporary,
- m_of << "tmp" << e.idx;
+ if( e.idx == ~0u )
+ m_of << "i";
+ else
+ m_of << "tmp" << e.idx;
),
(Argument,
m_of << "arg" << e.idx;
@@ -2713,11 +2837,11 @@ namespace {
::HIR::TypeRef tmp;
const auto& ty = m_mir_res->get_lvalue_type(tmp, val);
auto dst_type = metadata_type(ty);
- if( dst_type != MetadataType:: None )
+ if( dst_type != MetadataType::None )
{
m_of << "(*("; emit_ctype(ty); m_of << "*)";
emit_lvalue(*e.val);
- m_of << ")";
+ m_of << ".PTR)";
}
else
{
@@ -2783,7 +2907,18 @@ namespace {
if( c.v == INT64_MIN )
m_of << "INT64_MIN";
else
+ {
m_of << c.v;
+ switch(c.t)
+ {
+ case ::HIR::CoreType::I64:
+ case ::HIR::CoreType::I128:
+ case ::HIR::CoreType::Isize:
+ m_of << "ll";
+ default:
+ break;
+ }
+ }
),
(Uint,
switch(c.t)
@@ -2800,7 +2935,7 @@ namespace {
case ::HIR::CoreType::U64:
case ::HIR::CoreType::U128:
case ::HIR::CoreType::Usize:
- m_of << ::std::hex << "0x" << c.v << ::std::dec;
+ m_of << ::std::hex << "0x" << c.v << "ull" << ::std::dec;
break;
case ::HIR::CoreType::Char:
assert(0 <= c.v && c.v <= 0x10FFFF);
@@ -2837,7 +2972,7 @@ namespace {
if( ' ' <= v && v < 0x7F && v != '"' && v != '\\' )
m_of << v;
else
- m_of << "\\" << (unsigned int)v;
+ m_of << "\\" << ((unsigned int)v & 0xFF);
}
m_of << "\"" << ::std::dec;
),
@@ -2847,7 +2982,7 @@ namespace {
if( ' ' <= v && v < 0x7F && v != '"' && v != '\\' )
m_of << v;
else
- m_of << "\\" << (unsigned int)v;
+ m_of << "\\" << ((unsigned int)v & 0xFF);
}
m_of << "\", " << ::std::dec << c.size() << ")";
),
diff --git a/src/trans/enumerate.cpp b/src/trans/enumerate.cpp
index 89846344..02116d18 100644
--- a/src/trans/enumerate.cpp
+++ b/src/trans/enumerate.cpp
@@ -515,6 +515,7 @@ namespace {
// Ensure that the data trait's vtable is present
const auto& trait = *te.m_trait.m_trait_ptr;
+ ASSERT_BUG(Span(), ! te.m_trait.m_path.m_path.m_components.empty(), "TODO: Data trait is empty, what can be done?");
auto vtable_ty_spath = te.m_trait.m_path.m_path;
vtable_ty_spath.m_components.back() += "#vtable";
const auto& vtable_ref = m_crate.get_struct_by_path(sp, vtable_ty_spath);
@@ -874,6 +875,8 @@ void Trans_Enumerate_Types(EnumState& state)
for(const auto& v : se.inputs)
H::visit_lvalue(tv,pp,fcn, v.second);
),
+ (ScopeEnd,
+ ),
(Assign,
H::visit_lvalue(tv,pp,fcn, se.dst);
TU_MATCHA( (se.src), (re),
@@ -989,6 +992,7 @@ void Trans_Enumerate_Types(EnumState& state)
tv.m_resolve.expand_associated_types( sp, vtable_params.m_types[idx] );
}
+ tv.visit_type( *ent.first.m_data.as_UfcsKnown().type );
tv.visit_type( ::HIR::TypeRef( ::HIR::GenericPath(vtable_ty_spath, mv$(vtable_params)), &vtable_ref ) );
}
@@ -1150,7 +1154,7 @@ namespace {
::std::vector<::HIR::TypeRef> best_impl_params;
const ::HIR::TraitImpl* best_impl = nullptr;
resolve.find_impl(sp, e.trait.m_path, e.trait.m_params, *e.type, [&](auto impl_ref, auto is_fuzz) {
- DEBUG("Found " << impl_ref);
+ DEBUG("[get_ent_fullpath] Found " << impl_ref);
//ASSERT_BUG(sp, !is_fuzz, "Fuzzy match not allowed here");
if( ! impl_ref.m_data.is_TraitImpl() ) {
DEBUG("Trans impl search found an invalid impl type");
@@ -1201,6 +1205,8 @@ namespace {
else
BUG(sp, "Parameter " << i << " unset");
}
+ if( is_spec )
+ DEBUG("- Specialisable");
return !is_spec;
}
return false;
@@ -1457,6 +1463,8 @@ void Trans_Enumerate_FillFrom_MIR(EnumState& state, const ::MIR::Function& code,
),
(SetDropFlag,
),
+ (ScopeEnd,
+ ),
(Drop,
DEBUG("- DROP " << se.slot);
Trans_Enumerate_FillFrom_MIR_LValue(state, se.slot, pp);
diff --git a/src/trans/main_bindings.hpp b/src/trans/main_bindings.hpp
index 2878cc66..59933863 100644
--- a/src/trans/main_bindings.hpp
+++ b/src/trans/main_bindings.hpp
@@ -23,6 +23,7 @@ struct TransOptions
};
extern TransList Trans_Enumerate_Main(const ::HIR::Crate& crate);
+extern TransList Trans_Enumerate_Test(const ::HIR::Crate& crate);
// NOTE: This also sets the saveout flags
extern TransList Trans_Enumerate_Public(::HIR::Crate& crate);
diff --git a/src/trans/monomorphise.cpp b/src/trans/monomorphise.cpp
index 1688a8f5..3ac2ee09 100644
--- a/src/trans/monomorphise.cpp
+++ b/src/trans/monomorphise.cpp
@@ -150,6 +150,9 @@ namespace {
case ::MIR::Statement::TAG_SetDropFlag:
statements.push_back( ::MIR::Statement( stmt.as_SetDropFlag() ) );
break;
+ case ::MIR::Statement::TAG_ScopeEnd:
+ statements.push_back( ::MIR::Statement( stmt.as_ScopeEnd() ) );
+ break;
case ::MIR::Statement::TAG_Drop: {
const auto& e = stmt.as_Drop();
DEBUG("- DROP " << e.slot);
@@ -249,9 +252,21 @@ namespace {
mv$(rval)
}) );
} break;
- case ::MIR::Statement::TAG_Asm:
- TODO(params.sp, "Monormorphise asm!");
- break;
+ case ::MIR::Statement::TAG_Asm: {
+ const auto& e = stmt.as_Asm();
+ DEBUG("- asm! \"" << e.tpl << "\"");
+ ::std::vector< ::std::pair<::std::string, ::MIR::LValue>> new_out, new_in;
+ new_out.reserve( e.outputs.size() );
+ for(auto& ent : e.outputs)
+ new_out.push_back(::std::make_pair( ent.first, monomorph_LValue(resolve, params, ent.second) ));
+ new_in.reserve( e.inputs.size() );
+ for(auto& ent : e.inputs)
+ new_in.push_back(::std::make_pair( ent.first, monomorph_LValue(resolve, params, ent.second) ));
+
+ statements.push_back( ::MIR::Statement::make_Asm({
+ e.tpl, mv$(new_out), mv$(new_in), e.clobbers, e.flags
+ }) );
+ } break;
}
}
diff --git a/src/trans/target.cpp b/src/trans/target.cpp
new file mode 100644
index 00000000..21555101
--- /dev/null
+++ b/src/trans/target.cpp
@@ -0,0 +1,134 @@
+/*
+ * MRustC - Rust Compiler
+ * - By John Hodge (Mutabah/thePowersGang)
+ *
+ * trans/target.cpp
+ * - Target-specific information
+ */
+#include "target.hpp"
+
+// TODO: Replace with target selection
+#define POINTER_SIZE_BYTES 8
+
+bool Target_GetSizeAndAlignOf(const Span& sp, const ::HIR::TypeRef& ty, size_t& out_size, size_t& out_align)
+{
+ TU_MATCHA( (ty.m_data), (te),
+ (Infer,
+ BUG(sp, "sizeof on _ type");
+ ),
+ (Diverge,
+ out_size = 0;
+ out_align = 0;
+ return true;
+ ),
+ (Primitive,
+ switch(te)
+ {
+ case ::HIR::CoreType::Bool:
+ case ::HIR::CoreType::U8:
+ case ::HIR::CoreType::I8:
+ out_size = 1;
+ out_align = 1;
+ return true;
+ case ::HIR::CoreType::U16:
+ case ::HIR::CoreType::I16:
+ out_size = 2;
+ out_align = 2;
+ return true;
+ case ::HIR::CoreType::U32:
+ case ::HIR::CoreType::I32:
+ case ::HIR::CoreType::Char:
+ out_size = 4;
+ out_align = 4;
+ return true;
+ case ::HIR::CoreType::U64:
+ case ::HIR::CoreType::I64:
+ out_size = 8;
+ out_align = 8;
+ return true;
+ case ::HIR::CoreType::U128:
+ case ::HIR::CoreType::I128:
+ out_size = 16;
+ // TODO: If i128 is emulated, this can be 8
+ out_align = 16;
+ return true;
+ case ::HIR::CoreType::Usize:
+ case ::HIR::CoreType::Isize:
+ out_size = POINTER_SIZE_BYTES;
+ out_align = POINTER_SIZE_BYTES;
+ return true;
+ case ::HIR::CoreType::F32:
+ out_size = 4;
+ out_align = 4;
+ return true;
+ case ::HIR::CoreType::F64:
+ out_size = 8;
+ out_align = 8;
+ return true;
+ case ::HIR::CoreType::Str:
+ BUG(sp, "sizeof on a `str` - unsized");
+ }
+ ),
+ (Path,
+ // TODO:
+ return false;
+ ),
+ (Generic,
+ // Unknown - return false
+ return false;
+ ),
+ (TraitObject,
+ BUG(sp, "sizeof on a trait object - unsized");
+ ),
+ (ErasedType,
+ BUG(sp, "sizeof on an erased type - shouldn't exist");
+ ),
+ (Array,
+ // TODO:
+ size_t size;
+ if( !Target_GetSizeAndAlignOf(sp, *te.inner, size,out_align) )
+ return false;
+ size *= te.size_val;
+ ),
+ (Slice,
+ BUG(sp, "sizeof on a slice - unsized");
+ ),
+ (Tuple,
+ out_size = 0;
+ out_align = 0;
+
+ // TODO: Struct reordering
+ for(const auto& t : te)
+ {
+ size_t size, align;
+ if( !Target_GetSizeAndAlignOf(sp, t, size,align) )
+ return false;
+ out_size += size;
+ out_align = ::std::max(out_align, align);
+ }
+ ),
+ (Borrow,
+ // TODO
+ ),
+ (Pointer,
+ // TODO
+ ),
+ (Function,
+ // Pointer size
+ ),
+ (Closure,
+ // TODO.
+ )
+ )
+ return false;
+}
+bool Target_GetSizeOf(const Span& sp, const ::HIR::TypeRef& ty, size_t& out_size)
+{
+ size_t ignore_align;
+ return Target_GetSizeAndAlignOf(sp, ty, out_size, ignore_align);
+}
+bool Target_GetAlignOf(const Span& sp, const ::HIR::TypeRef& ty, size_t& out_align)
+{
+ size_t ignore_size;
+ return Target_GetSizeAndAlignOf(sp, ty, ignore_size, out_align);
+}
diff --git a/src/trans/target.hpp b/src/trans/target.hpp
new file mode 100644
index 00000000..1c081b54
--- /dev/null
+++ b/src/trans/target.hpp
@@ -0,0 +1,15 @@
+/*
+ * MRustC - Rust Compiler
+ * - By John Hodge (Mutabah/thePowersGang)
+ *
+ * trans/target.hpp
+ * - Target-specific information
+ */
+#pragma once
+
+#include <cstddef>
+#include <hir/type.hpp>
+
+extern bool Target_GetSizeOf(const Span& sp, const ::HIR::TypeRef& ty, size_t& out_size);
+extern bool Target_GetAlignOf(const Span& sp, const ::HIR::TypeRef& ty, size_t& out_align);
+