diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/hir/dump.cpp | 2 | ||||
-rw-r--r-- | src/hir/pattern.cpp | 24 | ||||
-rw-r--r-- | src/hir_typeck/expr_cs.cpp | 67 | ||||
-rw-r--r-- | src/mir/check.cpp | 46 | ||||
-rw-r--r-- | src/mir/dump.cpp | 11 | ||||
-rw-r--r-- | src/mir/from_hir.cpp | 96 | ||||
-rw-r--r-- | src/mir/from_hir.hpp | 37 | ||||
-rw-r--r-- | src/mir/from_hir_match.cpp | 18 | ||||
-rw-r--r-- | src/mir/mir_builder.cpp | 492 | ||||
-rw-r--r-- | src/trans/codegen_c.cpp | 96 | ||||
-rw-r--r-- | src/trans/enumerate.cpp | 12 | ||||
-rw-r--r-- | src/trans/mangling.cpp | 12 | ||||
-rw-r--r-- | src/trans/mangling.hpp | 2 |
13 files changed, 621 insertions, 294 deletions
diff --git a/src/hir/dump.cpp b/src/hir/dump.cpp index ea113a00..5be6ab92 100644 --- a/src/hir/dump.cpp +++ b/src/hir/dump.cpp @@ -678,7 +678,7 @@ namespace { private: RepeatLitStr indent() const { - return RepeatLitStr { " ", static_cast<int>(m_indent_level) }; + return RepeatLitStr { " ", static_cast<int>(m_indent_level) }; } void inc_indent() { m_indent_level ++; diff --git a/src/hir/pattern.cpp b/src/hir/pattern.cpp index 74528aeb..f51d7990 100644 --- a/src/hir/pattern.cpp +++ b/src/hir/pattern.cpp @@ -30,17 +30,21 @@ namespace HIR { ) return os; } + ::std::ostream& operator<<(::std::ostream& os, const PatternBinding& x) { + if( x.m_mutable ) + os << "mut "; + switch(x.m_type) + { + case PatternBinding::Type::Move: break; + case PatternBinding::Type::Ref: os << "ref "; break; + case PatternBinding::Type::MutRef: os << "ref mut "; break; + } + os << x.m_name << "/*"<<x.m_slot<<"*/" << " @ "; + return os; + } ::std::ostream& operator<<(::std::ostream& os, const Pattern& x) { if( x.m_binding.is_valid() ) { - if( x.m_binding.m_mutable ) - os << "mut "; - switch(x.m_binding.m_type) - { - case PatternBinding::Type::Move: break; - case PatternBinding::Type::Ref: os << "ref "; break; - case PatternBinding::Type::MutRef: os << "ref mut "; break; - } - os << x.m_binding.m_name << "/*"<<x.m_binding.m_slot<<"*/" << " @ "; + os << x.m_binding; } TU_MATCH(Pattern::Data, (x.m_data), (e), (Any, @@ -126,7 +130,7 @@ namespace HIR { for(const auto& s : e.leading) os << s << ", "; if( e.extra_bind.is_valid() ) { - os << e.extra_bind.m_name << " @ "; + os << e.extra_bind; } os << ".. "; for(const auto& s : e.trailing) diff --git a/src/hir_typeck/expr_cs.cpp b/src/hir_typeck/expr_cs.cpp index b1ee04bc..4c8e97ec 100644 --- a/src/hir_typeck/expr_cs.cpp +++ b/src/hir_typeck/expr_cs.cpp @@ -4070,22 +4070,42 @@ void fix_param_count(const Span& sp, Context& context, const ::HIR::TypeRef& sel } namespace { - void add_coerce_borrow(Context& context, ::HIR::ExprNodeP& node_ptr, const ::HIR::TypeRef& des_borrow_inner, ::std::function<void(::HIR::ExprNodeP& n)> cb) + void add_coerce_borrow(Context& context, ::HIR::ExprNodeP& orig_node_ptr, const ::HIR::TypeRef& des_borrow_inner, ::std::function<void(::HIR::ExprNodeP& n)> cb) { - const auto& src_type = context.m_ivars.get_type(node_ptr->m_res_type); + const auto& src_type = context.m_ivars.get_type(orig_node_ptr->m_res_type); + auto borrow_type = src_type.m_data.as_Borrow().type; // Since this function operates on destructured &-ptrs, the dereferences have to be added behind a borrow - ::HIR::ExprNodeP* node_ptr_ptr = nullptr; + ::HIR::ExprNodeP* node_ptr_ptr = &orig_node_ptr; + + #if 1 + // If the coercion is of a block, apply the mutation to the inner node + while( auto* p = dynamic_cast< ::HIR::ExprNode_Block*>(&**node_ptr_ptr) ) + { + DEBUG("- Moving into block"); + ASSERT_BUG( p->span(), context.m_ivars.types_equal(p->m_res_type, p->m_nodes.back()->m_res_type), + "Block and result mismatch - " << context.m_ivars.fmt_type(p->m_res_type) << " != " << context.m_ivars.fmt_type(p->m_nodes.back()->m_res_type)); + // - Override the the result type to the desired result + p->m_res_type = ::HIR::TypeRef::new_borrow(borrow_type, des_borrow_inner.clone()); + node_ptr_ptr = &p->m_nodes.back(); + } + #endif + auto& node_ptr = *node_ptr_ptr; + // - If the pointed node is a borrow operation, add the dereferences within its value - if( auto* p = dynamic_cast< ::HIR::ExprNode_Borrow*>(&*node_ptr) ) { + if( auto* p = dynamic_cast< ::HIR::ExprNode_Borrow*>(&*node_ptr) ) + { + // Set the result of the borrow operation to the output type + node_ptr->m_res_type = ::HIR::TypeRef::new_borrow(borrow_type, des_borrow_inner.clone()); + node_ptr_ptr = &p->m_value; } - // - Otherwise, create a new borrow operation behind which the dereferences ahppen - if( !node_ptr_ptr ) { + // - Otherwise, create a new borrow operation behind which the dereferences happen + else + { DEBUG("- Coercion node isn't a borrow, adding one"); auto span = node_ptr->span(); const auto& src_inner_ty = *src_type.m_data.as_Borrow().inner; - auto borrow_type = src_type.m_data.as_Borrow().type; auto inner_ty_ref = ::HIR::TypeRef::new_borrow(borrow_type, des_borrow_inner.clone()); @@ -4097,11 +4117,6 @@ namespace { // - Set node pointer reference to point into the new borrow op node_ptr_ptr = &dynamic_cast< ::HIR::ExprNode_Borrow&>(*node_ptr).m_value; } - else { - auto borrow_type = context.m_ivars.get_type(node_ptr->m_res_type).m_data.as_Borrow().type; - // Set the result of the borrow operation to the output type - node_ptr->m_res_type = ::HIR::TypeRef::new_borrow(borrow_type, des_borrow_inner.clone()); - } cb(*node_ptr_ptr); @@ -4628,16 +4643,35 @@ namespace { (Borrow, TU_IFLET(::HIR::TypeRef::Data, ty_src.m_data, Borrow, r_e, // If using `&mut T` where `&const T` is expected - insert a reborrow (&*) - // TODO: &move reboorrowing rules? + // TODO: &move reborrowing rules? //if( l_e.type < r_e.type ) { if( l_e.type == ::HIR::BorrowType::Shared && r_e.type == ::HIR::BorrowType::Unique ) { - // Add cast down - auto span = node_ptr->span(); - // > Goes from `ty_src` -> `*ty_src` -> `&`l_e.type` `&ty_src` + // > Goes from `ty_src` -> `*ty_src` -> `&`l_e.type` `*ty_src` const auto& inner_ty = *r_e.inner; auto dst_bt = l_e.type; auto new_type = ::HIR::TypeRef::new_borrow(dst_bt, inner_ty.clone()); + + // If the coercion is of a block, do the reborrow on the last node of the block + // - Cleans up the dumped MIR and prevents needing a reborrow elsewhere. + #if 1 + ::HIR::ExprNodeP* npp = &node_ptr; + while( auto* p = dynamic_cast< ::HIR::ExprNode_Block*>(&**npp) ) + { + DEBUG("- Propagate to the last node of a _Block"); + ASSERT_BUG( p->span(), context.m_ivars.types_equal(p->m_res_type, p->m_nodes.back()->m_res_type), + "Block and result mismatch - " << context.m_ivars.fmt_type(p->m_res_type) << " != " << context.m_ivars.fmt_type(p->m_nodes.back()->m_res_type)); + ASSERT_BUG( p->span(), context.m_ivars.types_equal(p->m_res_type, ty_src), + "Block and result mismatch - " << context.m_ivars.fmt_type(p->m_res_type) << " != " << context.m_ivars.fmt_type(ty_src) + ); + p->m_res_type = new_type.clone(); + npp = &p->m_nodes.back(); + } + ::HIR::ExprNodeP& node_ptr = *npp; + #endif + + // Add cast down + auto span = node_ptr->span(); // *<inner> DEBUG("- Deref -> " << inner_ty); node_ptr = NEWNODE( inner_ty.clone(), span, _Deref, mv$(node_ptr) ); @@ -4654,7 +4688,6 @@ namespace { else if( l_e.type != r_e.type ) { ERROR(sp, E0000, "Type mismatch between " << ty_dst << " and " << ty_src << " - Borrow classes differ"); } - // - Check for coercions return check_coerce_borrow(context, l_e.type, *l_e.inner, *r_e.inner, node_ptr); ) diff --git a/src/mir/check.cpp b/src/mir/check.cpp index 186e93fb..0c428c64 100644 --- a/src/mir/check.cpp +++ b/src/mir/check.cpp @@ -14,6 +14,7 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path, const ::MIR::Function& fcn, const ::HIR::Function::args_t& args, const ::HIR::TypeRef& ret_type) { + TRACE_FUNCTION_F(path); Span sp; ::MIR::TypeResolve state { sp, resolve, FMT_CB(ss, ss << path;), ret_type, args, fcn }; // Validation rules: @@ -107,6 +108,14 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path { } + bool operator==(const ValStates& x) const { + if( ret_state != x.ret_state ) return false; + if( arguments != x.arguments ) return false; + if( temporaries != x.temporaries ) return false; + if( variables != x.variables ) return false; + return true; + } + bool empty() const { return arguments.empty() && temporaries.empty() && variables.empty(); } @@ -221,6 +230,15 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path }; ::std::vector< ValStates> block_start_states( fcn.blocks.size() ); ::std::vector< ::std::pair<unsigned int, ValStates> > to_visit_blocks; + + auto add_to_visit = [&](auto idx, auto vs) { + for(const auto& b : to_visit_blocks) + if( b.first == idx && b.second == vs) + return ; + if( block_start_states.at(idx) == vs ) + return ; + to_visit_blocks.push_back( ::std::make_pair(idx, mv$(vs)) ); + }; to_visit_blocks.push_back( ::std::make_pair(0, ValStates{ args.size(), fcn.temporaries.size(), fcn.named_variables.size() }) ); while( to_visit_blocks.size() > 0 ) { @@ -265,7 +283,9 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path val_state.ensure_valid(state, se.val); ), (Cast, - val_state.move_val(state, se.val); + // Well.. it's not exactly moved... + val_state.ensure_valid(state, se.val); + //val_state.move_val(state, se.val); ), (BinOp, val_state.move_val(state, se.val_l); @@ -335,21 +355,35 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path ), (Goto, // Push block with the new state - to_visit_blocks.push_back( ::std::make_pair(e, ::std::move(val_state)) ); + add_to_visit( e, mv$(val_state) ); ), (Panic, // What should be done here? ), (If, // Push blocks - to_visit_blocks.push_back( ::std::make_pair(e.bb0, val_state) ); - to_visit_blocks.push_back( ::std::make_pair(e.bb1, ::std::move(val_state)) ); + val_state.ensure_valid( state, e.cond ); + add_to_visit( e.bb0, val_state ); + add_to_visit( e.bb1, mv$(val_state) ); ), (Switch, - // TODO: Push blocks + val_state.ensure_valid( state, e.val ); + for(const auto& tgt : e.targets) + { + add_to_visit( tgt, val_state ); + } ), (Call, - // TODO: Push blocks (with return valid only in one) + if( e.fcn.is_Value() ) + val_state.ensure_valid( state, e.fcn.as_Value() ); + for(const auto& arg : e.args) + val_state.ensure_valid( state, arg ); + // Push blocks (with return valid only in one) + add_to_visit(e.panic_block, val_state); + + // TODO: If the function returns !, don't follow the ret_block + val_state.mark_validity( state, e.ret_val, true ); + add_to_visit(e.ret_block, mv$(val_state)); ) ) } diff --git a/src/mir/dump.cpp b/src/mir/dump.cpp index 555093f2..d143c2c9 100644 --- a/src/mir/dump.cpp +++ b/src/mir/dump.cpp @@ -165,7 +165,16 @@ namespace { ), (Drop, DEBUG("- DROP " << e.slot); - m_os << "drop(" << FMT_M(e.slot) << ");\n"; + m_os << "drop(" << FMT_M(e.slot); + switch( e.kind ) + { + case ::MIR::eDropKind::SHALLOW: + m_os << " SHALLOW"; + break; + case ::MIR::eDropKind::DEEP: + break; + } + m_os << ");\n"; ) ) } diff --git a/src/mir/from_hir.cpp b/src/mir/from_hir.cpp index e21ca993..b5e42d31 100644 --- a/src/mir/from_hir.cpp +++ b/src/mir/from_hir.cpp @@ -280,12 +280,17 @@ namespace { // These are only refutable if T is [T] bool ty_is_array = false; unsigned int array_size = 0; - m_builder.with_val_type(sp, lval, [&ty_is_array,&array_size](const auto& ty){ + ::HIR::TypeRef inner_type; + m_builder.with_val_type(sp, lval, [&ty_is_array,&array_size,&e,&inner_type](const auto& ty){ if( ty.m_data.is_Array() ) { array_size = ty.m_data.as_Array().size_val; + if( e.extra_bind.is_valid() ) + inner_type = ty.m_data.as_Array().inner->clone(); ty_is_array = true; } else { + if( e.extra_bind.is_valid() ) + inner_type = ty.m_data.as_Slice().inner->clone(); ty_is_array = false; } }); @@ -297,6 +302,10 @@ namespace { unsigned int idx = 0 + i; destructure_from_ex(sp, e.leading[i], ::MIR::LValue::make_Field({ box$(lval.clone()), idx }), allow_refutable ); } + if( e.extra_bind.is_valid() ) + { + TODO(sp, "Destructure array obtaining remainder"); + } for(unsigned int i = 0; i < e.trailing.size(); i ++) { unsigned int idx = array_size - e.trailing.size() + i; @@ -306,12 +315,47 @@ namespace { else { ASSERT_BUG(sp, allow_refutable, "Refutable pattern not expected - " << pat); - // TODO: Acquire the slice size variable. + + // Acquire the slice size variable. + ::MIR::LValue len_lval; + if( e.extra_bind.is_valid() || e.trailing.size() > 0 ) + { + len_lval = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Usize, ::MIR::RValue::make_DstMeta({ lval.clone() })); + } + for(unsigned int i = 0; i < e.leading.size(); i ++) { unsigned int idx = i; destructure_from_ex(sp, e.leading[i], ::MIR::LValue::make_Field({ box$(lval.clone()), idx }), allow_refutable ); } + if( e.extra_bind.is_valid() ) + { + // 1. Obtain remaining length + auto sub_val = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Usize, ::MIR::RValue::make_Constant( e.leading.size() + e.trailing.size() )); + ::MIR::LValue len_val = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Usize, ::MIR::RValue::make_BinOp({ len_lval.clone(), ::MIR::eBinOp::SUB, mv$(sub_val) }) ); + + // 2. Obtain pointer to element + ::HIR::BorrowType bt = ::HIR::BorrowType::Owned; + switch(e.extra_bind.m_type) + { + case ::HIR::PatternBinding::Type::Move: + BUG(sp, "By-value pattern binding of a slice"); + throw ""; + case ::HIR::PatternBinding::Type::Ref: + bt = ::HIR::BorrowType::Shared; + break; + case ::HIR::PatternBinding::Type::MutRef: + bt = ::HIR::BorrowType::Unique; + break; + } + ::MIR::LValue ptr_val = m_builder.lvalue_or_temp(sp, + ::HIR::TypeRef::new_pointer( bt, inner_type.clone() ), + ::MIR::RValue::make_Borrow({ 0, bt, ::MIR::LValue::make_Field({ box$(lval.clone()), static_cast<unsigned int>(e.leading.size()) }) }) + ); + + // Construct fat pointer + m_builder.push_stmt_assign( sp, ::MIR::LValue::make_Variable(e.extra_bind.m_slot), ::MIR::RValue::make_MakeDst({ mv$(ptr_val), mv$(len_val) }) ); + } if( e.trailing.size() > 0 ) { TODO(sp, "Destructure slice using SplitSlice with trailing - " << pat); @@ -355,12 +399,13 @@ namespace { } } - // - For the last node, don't bother with a statement scope + // For the last node, specially handle. if( node.m_yields_final ) { auto& subnode = node.m_nodes.back(); const Span& sp = subnode->span(); + auto res_val = m_builder.new_temporary(node.m_res_type); auto stmt_scope = m_builder.new_scope_temp(sp); this->visit_node_ptr(subnode); if( m_builder.has_result() || m_builder.block_active() ) @@ -369,12 +414,11 @@ namespace { ASSERT_BUG(sp, m_builder.has_result(), "Active block but no result yeilded"); // PROBLEM: This can drop the result before we want to use it. - auto res = m_builder.get_result(sp); - m_builder.raise_variables(sp, res); + m_builder.push_stmt_assign(sp, res_val.clone(), m_builder.get_result(sp)); m_builder.terminate_scope(sp, mv$(stmt_scope)); m_builder.terminate_scope( node.span(), mv$(scope) ); - m_builder.set_result( node.span(), mv$(res) ); + m_builder.set_result( node.span(), mv$(res_val) ); } else { @@ -484,6 +528,8 @@ namespace { assert( !m_builder.has_result() ); m_builder.end_block( ::MIR::Terminator::make_Diverge({}) ); } + + // TODO: Store the variable state on a break for restoration at the end of the loop. } void visit(::HIR::ExprNode_LoopControl& node) override { @@ -501,12 +547,12 @@ namespace { target_block = &*it; } - // TODO: Insert drop of all active scopes within the loop - m_builder.terminate_scope_early( node.span(), target_block->scope ); if( node.m_continue ) { + m_builder.terminate_scope_early( node.span(), target_block->scope, /*loop_exit=*/false ); m_builder.end_block( ::MIR::Terminator::make_Goto(target_block->cur) ); } else { + m_builder.terminate_scope_early( node.span(), target_block->scope, /*loop_exit=*/true ); m_builder.end_block( ::MIR::Terminator::make_Goto(target_block->next) ); } } @@ -543,7 +589,7 @@ namespace { if( m_builder.block_active() ) { auto res = m_builder.get_result(arm.m_code->span()); - m_builder.raise_variables( arm.m_code->span(), res ); + m_builder.raise_variables( arm.m_code->span(), res, scope ); m_builder.set_result(arm.m_code->span(), mv$(res)); m_builder.terminate_scope( node.span(), mv$(tmp_scope) ); @@ -594,14 +640,17 @@ namespace { // 'true' branch { + auto stmt_scope = m_builder.new_scope_temp(node.m_true->span()); m_builder.set_cur_block(true_branch); this->visit_node_ptr(node.m_true); if( m_builder.block_active() || m_builder.has_result() ) { m_builder.push_stmt_assign( node.span(), result_val.clone(), m_builder.get_result(node.m_true->span()) ); + m_builder.terminate_scope(node.span(), mv$(stmt_scope)); m_builder.end_split_arm(node.span(), scope, true); m_builder.end_block( ::MIR::Terminator::make_Goto(next_block) ); } else { + { auto _ = mv$(stmt_scope); } m_builder.end_split_arm(node.span(), scope, false); } } @@ -610,14 +659,17 @@ namespace { m_builder.set_cur_block(false_branch); if( node.m_false ) { + auto stmt_scope = m_builder.new_scope_temp(node.m_false->span()); this->visit_node_ptr(node.m_false); if( m_builder.block_active() ) { m_builder.push_stmt_assign( node.span(), result_val.clone(), m_builder.get_result(node.m_false->span()) ); + m_builder.terminate_scope(node.span(), mv$(stmt_scope)); m_builder.end_block( ::MIR::Terminator::make_Goto(next_block) ); m_builder.end_split_arm(node.span(), scope, true); } else { + { auto _ = mv$(stmt_scope); } m_builder.end_split_arm(node.span(), scope, false); } } @@ -1409,7 +1461,19 @@ namespace { for(auto& arg : node.m_args) { this->visit_node_ptr(arg); - values.push_back( m_builder.get_result_in_lvalue(arg->span(), arg->m_res_type) ); + + if( node.m_args.size() == 1 ) + { + values.push_back( m_builder.get_result_in_lvalue(arg->span(), arg->m_res_type) ); + } + else + { + // NOTE: Have to allocate a new temporary because ordering matters + auto tmp = m_builder.new_temporary(arg->m_res_type); + m_builder.push_stmt_assign( arg->span(), tmp.clone(), m_builder.get_result(arg->span()) ); + values.push_back( mv$(tmp) ); + } + m_builder.moved_lvalue( arg->span(), values.back() ); } @@ -1439,6 +1503,12 @@ namespace { unconditional_diverge = true; } } + else + { + // TODO: Know if the call unconditionally diverges. + if( node.m_cache.m_arg_types.back().m_data.is_Diverge() ) + unconditional_diverge = true; + } // If the call wasn't to an intrinsic, emit it as a path if( m_builder.block_active() ) @@ -1798,7 +1868,11 @@ namespace { assert( !values_set[idx] ); values_set[idx] = true; this->visit_node_ptr(valnode); - values.at(idx) = m_builder.lvalue_or_temp( valnode->span(), valnode->m_res_type, m_builder.get_result(valnode->span()) ); + + // NOTE: Have to allocate a new temporary because ordering matters + auto tmp = m_builder.new_temporary(valnode->m_res_type); + m_builder.push_stmt_assign( valnode->span(), tmp.clone(), m_builder.get_result(valnode->span()) ); + values.at(idx) = mv$(tmp); } for(unsigned int i = 0; i < values.size(); i ++) { diff --git a/src/mir/from_hir.hpp b/src/mir/from_hir.hpp index 7c83b44f..405d4ffd 100644 --- a/src/mir/from_hir.hpp +++ b/src/mir/from_hir.hpp @@ -38,12 +38,33 @@ public: ~ScopeHandle(); }; +// TODO: Replace VarState with a TU +#if 0 +enum class InvalidType { + Uninit, + Moved, + Descoped, +}; +TAGGED_UNION(VarState, Uninit, + // Currently invalid + (Invalid, InvalidType), + // Partially valid (Map of field states, Box is assumed to have one field) + (Partial, ::std::vector<VarState>), + // Optionally valid (integer indicates the drop flag index) + (Optional, unsigned int), + // Fully valid + (Valid, struct {}), + ) +#endif + // TODO: Replace the first three states with just one (and flags for init/moved) enum class VarState { Uninit, // No value assigned yet Moved, // Definitely moved Dropped, // Dropped (out of scope) + // TODO: Store a bitmap of inner states? + // - Needs to handle relatively arbitary patterns. Including moving out of a Box, but not out of Drop types InnerMoved, // The inner has been moved, but the container needs to be dropped //MaybeMovedInner, // Inner possibly has been moved MaybeMoved, // Possibly has been moved @@ -55,11 +76,8 @@ extern ::std::ostream& operator<<(::std::ostream& os, VarState x); struct SplitArm { bool has_early_terminated = false; bool always_early_terminated = false; // Populated on completion - ::std::vector<bool> changed_var_states; // Indexed by binding bumber - ::std::vector<VarState> var_states; - - ::std::vector<bool> changed_tmp_states; - ::std::vector<VarState> tmp_states; + ::std::map<unsigned int, VarState> var_states; + ::std::map<unsigned int, VarState> tmp_states; }; TAGGED_UNION(ScopeType, Variables, @@ -73,6 +91,9 @@ TAGGED_UNION(ScopeType, Variables, ::std::vector<SplitArm> arms; }), (Loop, struct { + ::std::set<unsigned int> changed_vars; + ::std::set<unsigned int> changed_tmps; + ::std::vector<SplitArm> exit_states; }) ); @@ -163,8 +184,8 @@ public: void mark_value_assigned(const Span& sp, const ::MIR::LValue& val); // Moves control of temporaries up to the next scope - void raise_variables(const Span& sp, const ::MIR::LValue& val); - void raise_variables(const Span& sp, const ::MIR::RValue& rval); + void raise_variables(const Span& sp, const ::MIR::LValue& val, const ScopeHandle& scope); + void raise_variables(const Span& sp, const ::MIR::RValue& rval, const ScopeHandle& scope); void set_cur_block(unsigned int new_block); ::MIR::BasicBlockId pause_cur_block(); @@ -179,7 +200,7 @@ public: ScopeHandle new_scope_split(const Span& sp); ScopeHandle new_scope_loop(const Span& sp); void terminate_scope(const Span& sp, ScopeHandle , bool cleanup=true); - void terminate_scope_early(const Span& sp, const ScopeHandle& ); + void terminate_scope_early(const Span& sp, const ScopeHandle& , bool loop_exit=false); void end_split_arm(const Span& sp, const ScopeHandle& , bool reachable); void end_split_arm_early(const Span& sp); diff --git a/src/mir/from_hir_match.cpp b/src/mir/from_hir_match.cpp index d3c5a5eb..183d6418 100644 --- a/src/mir/from_hir_match.cpp +++ b/src/mir/from_hir_match.cpp @@ -120,8 +120,7 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod auto next_block = builder.new_bb_unlinked(); // 1. Stop the current block so we can generate code - auto first_cmp_block = builder.new_bb_unlinked(); - builder.end_block( ::MIR::Terminator::make_Goto(first_cmp_block) ); + auto first_cmp_block = builder.pause_cur_block(); struct H { @@ -242,7 +241,7 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod t_arm_rules arm_rules; for(unsigned int arm_idx = 0; arm_idx < node.m_arms.size(); arm_idx ++) { - DEBUG("ARM " << arm_idx); + TRACE_FUNCTION_FR("ARM " << arm_idx, "ARM" << arm_idx); /*const*/ auto& arm = node.m_arms[arm_idx]; ArmCode ac; @@ -251,6 +250,7 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod // - Define variables from the first pattern conv.define_vars_from(node.span(), arm.m_patterns.front()); + auto pat_scope = builder.new_scope_split(node.span()); for( unsigned int pat_idx = 0; pat_idx < arm.m_patterns.size(); pat_idx ++ ) { const auto& pat = arm.m_patterns[pat_idx]; @@ -271,9 +271,11 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod ac.destructures.push_back( builder.new_bb_unlinked() ); builder.set_cur_block( ac.destructures.back() ); conv.destructure_from( arm.m_code->span(), pat, match_val.clone(), true ); + builder.end_split_arm( arm.m_code->span(), pat_scope, /*reachable=*/false ); // HACK: Mark as not reachable, this scope isn't for codegen. builder.pause_cur_block(); // NOTE: Paused block resumed upon successful match } + builder.terminate_scope( arm.m_code->span(), mv$(pat_scope) ); // TODO: If this pattern ignores fields with Drop impls, this will lead to leaks. // - Ideally, this would trigger a drop of whatever wasn't already taken by the pattern. @@ -292,10 +294,13 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod ac.cond_start = builder.new_bb_unlinked(); builder.set_cur_block( ac.cond_start ); - // TODO: Temp scope. + auto tmp_scope = builder.new_scope_temp(arm.m_cond->span()); conv.visit_node_ptr( arm.m_cond ); ac.cond_lval = builder.get_result_in_lvalue(arm.m_cond->span(), ::HIR::TypeRef(::HIR::CoreType::Bool)); + // NOTE: Terminating the scope slightly early is safe, because the resulting boolean temp isn't invalidated. + builder.terminate_scope( arm.m_code->span(), mv$(tmp_scope) ); ac.cond_end = builder.pause_cur_block(); + // NOTE: Paused so that later code (which knows what the false branch will be) can end it correctly // TODO: What to do with contidionals in the fast model? @@ -309,10 +314,12 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod // Code DEBUG("-- Body Code"); + ac.code = builder.new_bb_unlinked(); auto tmp_scope = builder.new_scope_temp(arm.m_code->span()); builder.set_cur_block( ac.code ); conv.visit_node_ptr( arm.m_code ); + if( !builder.block_active() && !builder.has_result() ) { DEBUG("Arm diverged"); // Nothing need be done, as the block diverged. @@ -325,7 +332,6 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod DEBUG("Arm result"); // - Set result auto res = builder.get_result(arm.m_code->span()); - //builder.raise_variables( arm.m_code->span(), res ); builder.push_stmt_assign( arm.m_code->span(), result_val.clone(), mv$(res) ); // - Drop all non-moved values from this scope builder.terminate_scope( arm.m_code->span(), mv$(tmp_scope) ); @@ -377,6 +383,8 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod DEBUG("> (" << arm_rule.arm_idx << ", " << arm_rule.pat_idx << ") - " << arm_rule.m_rules); } + // TODO: Don't generate inner code until decisions are generated (keeps MIR flow nice) + // TODO: Detect if a rule is ordering-dependent. In this case we currently have to fall back on the simple match code // - A way would be to search for `_` rules with non _ rules following. Would false-positive in some cases, but shouldn't false negative // TODO: Merge equal rulesets if there's one with no condition. diff --git a/src/mir/mir_builder.cpp b/src/mir/mir_builder.cpp index cd7ddbcd..17cf79ec 100644 --- a/src/mir/mir_builder.cpp +++ b/src/mir/mir_builder.cpp @@ -263,6 +263,7 @@ void MirBuilder::push_stmt_drop(const Span& sp, ::MIR::LValue val) return ; } + DEBUG("DROP " << val); m_output.blocks.at(m_current_block).statements.push_back( ::MIR::Statement::make_Drop({ ::MIR::eDropKind::DEEP, mv$(val) }) ); } void MirBuilder::push_stmt_drop_shallow(const Span& sp, ::MIR::LValue val) @@ -276,6 +277,7 @@ void MirBuilder::push_stmt_drop_shallow(const Span& sp, ::MIR::LValue val) // return ; //} + DEBUG("DROP shallow " << val); m_output.blocks.at(m_current_block).statements.push_back( ::MIR::Statement::make_Drop({ ::MIR::eDropKind::SHALLOW, mv$(val) }) ); } @@ -335,16 +337,22 @@ void MirBuilder::mark_value_assigned(const Span& sp, const ::MIR::LValue& dst) ) } -void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val) +void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val, const ScopeHandle& scope) { TRACE_FUNCTION_F(val); TU_MATCH_DEF(::MIR::LValue, (val), (e), ( ), + // TODO: This may not be correct, because it can change the drop points and ordering + // HACK: Working around cases where values are dropped while the result is not yet used. (Deref, - // TODO: This may not be correct, because it can change the drop points and ordering - // HACK: Working around cases where values are dropped while the result is not yet used. - raise_variables(sp, *e.val); + raise_variables(sp, *e.val, scope); + ), + (Field, + raise_variables(sp, *e.val, scope); + ), + (Downcast, + raise_variables(sp, *e.val, scope); ), // Actual value types (Variable, @@ -359,10 +367,13 @@ void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val) if( tmp_it != e.vars.end() ) { e.vars.erase( tmp_it ); - DEBUG("Move variable " << idx << " from " << *scope_it); + DEBUG("Raise variable " << idx << " from " << *scope_it); break ; } ) + // If the variable was defined above the desired scope (i.e. this didn't find it), return + if( *scope_it == scope.idx ) + return ; ++scope_it; } if( scope_it == m_scope_stack.rend() ) @@ -398,10 +409,14 @@ void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val) if( tmp_it != e.temporaries.end() ) { e.temporaries.erase( tmp_it ); - DEBUG("Move temporary " << idx << " from " << *scope_it); + DEBUG("Raise temporary " << idx << " from " << *scope_it); break ; } ) + + // If the temporary was defined above the desired scope (i.e. this didn't find it), return + if( *scope_it == scope.idx ) + return ; ++scope_it; } if( scope_it == m_scope_stack.rend() ) @@ -427,55 +442,55 @@ void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val) ) ) } -void MirBuilder::raise_variables(const Span& sp, const ::MIR::RValue& rval) +void MirBuilder::raise_variables(const Span& sp, const ::MIR::RValue& rval, const ScopeHandle& scope) { TU_MATCHA( (rval), (e), (Use, - this->raise_variables(sp, e); + this->raise_variables(sp, e, scope); ), (Constant, ), (SizedArray, - this->raise_variables(sp, e.val); + this->raise_variables(sp, e.val, scope); ), (Borrow, // TODO: Wait, is this valid? - this->raise_variables(sp, e.val); + this->raise_variables(sp, e.val, scope); ), (Cast, - this->raise_variables(sp, e.val); + this->raise_variables(sp, e.val, scope); ), (BinOp, - this->raise_variables(sp, e.val_l); - this->raise_variables(sp, e.val_r); + this->raise_variables(sp, e.val_l, scope); + this->raise_variables(sp, e.val_r, scope); ), (UniOp, - this->raise_variables(sp, e.val); + this->raise_variables(sp, e.val, scope); ), (DstMeta, - this->raise_variables(sp, e.val); + this->raise_variables(sp, e.val, scope); ), (DstPtr, - this->raise_variables(sp, e.val); + this->raise_variables(sp, e.val, scope); ), (MakeDst, - this->raise_variables(sp, e.ptr_val); - this->raise_variables(sp, e.meta_val); + this->raise_variables(sp, e.ptr_val, scope); + this->raise_variables(sp, e.meta_val, scope); ), (Tuple, for(const auto& val : e.vals) - this->raise_variables(sp, val); + this->raise_variables(sp, val, scope); ), (Array, for(const auto& val : e.vals) - this->raise_variables(sp, val); + this->raise_variables(sp, val, scope); ), (Variant, - this->raise_variables(sp, e.val); + this->raise_variables(sp, e.val, scope); ), (Struct, for(const auto& val : e.vals) - this->raise_variables(sp, val); + this->raise_variables(sp, val, scope); ) ) } @@ -561,7 +576,7 @@ ScopeHandle MirBuilder::new_scope_loop(const Span& sp) } void MirBuilder::terminate_scope(const Span& sp, ScopeHandle scope, bool emit_cleanup/*=true*/) { - DEBUG("DONE scope " << scope.idx << " - " << (emit_cleanup ? "CLEANUP" : "NO CLEANUP")); + TRACE_FUNCTION_F("DONE scope " << scope.idx << " - " << (emit_cleanup ? "CLEANUP" : "NO CLEANUP")); // 1. Check that this is the current scope (at the top of the stack) if( m_scope_stack.empty() || m_scope_stack.back() != scope.idx ) { @@ -586,9 +601,9 @@ void MirBuilder::terminate_scope(const Span& sp, ScopeHandle scope, bool emit_cl complete_scope(scope_def); } -void MirBuilder::terminate_scope_early(const Span& sp, const ScopeHandle& scope) +void MirBuilder::terminate_scope_early(const Span& sp, const ScopeHandle& scope, bool loop_exit/*=false*/) { - DEBUG("EARLY scope " << scope.idx); + TRACE_FUNCTION_F("EARLY scope " << scope.idx); // 1. Ensure that this block is in the stack auto it = ::std::find( m_scope_stack.begin(), m_scope_stack.end(), scope.idx ); @@ -603,6 +618,25 @@ void MirBuilder::terminate_scope_early(const Span& sp, const ScopeHandle& scope) auto idx = m_scope_stack[i]; auto& scope_def = m_scopes.at( idx ); + if( idx == scope.idx ) + { + // If this is exiting a loop, save the state so the variable state after the loop is known. + if( loop_exit && scope_def.data.is_Loop() ) + { + auto& e = scope_def.data.as_Loop(); + SplitArm sa; + for(const auto& i : e.changed_vars) + { + sa.var_states.insert( ::std::make_pair(i, get_variable_state(sp, i)) ); + } + for(const auto& i : e.changed_tmps) + { + sa.tmp_states.insert( ::std::make_pair(i, get_temp_state(sp, i)) ); + } + e.exit_states.push_back( mv$(sa) ); + } + } + // If a conditional block is hit, prevent full termination of the rest if( scope_def.data.is_Split() || scope_def.data.is_Loop() ) is_conditional = true; @@ -634,34 +668,40 @@ void MirBuilder::end_split_arm(const Span& sp, const ScopeHandle& handle, bool r auto& sd_split = sd.data.as_Split(); ASSERT_BUG(sp, !sd_split.arms.empty(), ""); + TRACE_FUNCTION_F("end split scope " << handle.idx << " arm " << (sd_split.arms.size()-1)); + sd_split.arms.back().always_early_terminated = /*sd_split.arms.back().has_early_terminated &&*/ !reachable; // HACK: If this arm's end is reachable, convert InnerMoved (shallow drop) variable states to Moved // - I'm not 100% sure this is the correct place for calling drop. + #if 1 if( reachable ) { - auto& vss = sd_split.arms.back().var_states; - for(unsigned int i = 0; i < vss.size(); i ++ ) + for(auto& vse : sd_split.arms.back().var_states) { - auto& vs = vss[i]; + //auto i = vse.first; + auto& vs = vse.second; if( vs == VarState::InnerMoved ) { + // TODO: Refactor InnerMoved to handle partial moves via Box // Emit the shallow drop - push_stmt_drop_shallow( sp, ::MIR::LValue::make_Variable(i) ); + //push_stmt_drop_shallow( sp, ::MIR::LValue::make_Variable(i) ); vs = VarState::Moved; } } } + #endif sd_split.arms.push_back( {} ); } void MirBuilder::end_split_arm_early(const Span& sp) { + TRACE_FUNCTION_F(""); // Terminate all scopes until a split is found. while( ! m_scope_stack.empty() && ! (m_scopes.at( m_scope_stack.back() ).data.is_Split() || m_scopes.at( m_scope_stack.back() ).data.is_Loop()) ) { auto& scope_def = m_scopes[m_scope_stack.back()]; // Fully drop the scope - DEBUG("Complete scope " << m_scope_stack.size()-1); + DEBUG("Complete scope " << m_scope_stack.back()); drop_scope_values(scope_def); m_scope_stack.pop_back(); complete_scope(scope_def); @@ -669,20 +709,117 @@ void MirBuilder::end_split_arm_early(const Span& sp) if( !m_scope_stack.empty() && m_scopes.at( m_scope_stack.back() ).data.is_Split() ) { + DEBUG("Early terminate split scope " << m_scope_stack.back()); auto& sd = m_scopes[ m_scope_stack.back() ]; auto& sd_split = sd.data.as_Split(); sd_split.arms.back().has_early_terminated = true; - const auto& vss = sd_split.arms.back().var_states; - for(unsigned int i = 0; i < vss.size(); i ++ ) + for(const auto& vse : sd_split.arms.back().var_states) { - auto& vs = vss[i]; + auto i = vse.first; + const auto& vs = vse.second; if( vs == VarState::InnerMoved ) { // Emit the shallow drop push_stmt_drop_shallow( sp, ::MIR::LValue::make_Variable(i) ); - //vs = VarState::Dropped; + // - Don't update the state, because this drop isn't the end-of-scope drop + } + } + } +} +namespace { + static VarState merge_state(const Span& sp, VarState new_state, VarState old_state) + { + switch(old_state) + { + case VarState::Uninit: + switch( new_state ) + { + case VarState::Uninit: + //BUG(sp, "Variable state changed from Uninit to Uninit (wut?)"); + return VarState::Uninit; + case VarState::Init: + // TODO: MaybeInit? + return VarState::MaybeMoved; + case VarState::MaybeMoved: + return VarState::MaybeMoved; + case VarState::Moved: + return VarState::Uninit; + case VarState::InnerMoved: + TODO(sp, "Handle InnerMoved in Split scope (wa Uninit)"); + break; + case VarState::Dropped: + BUG(sp, "Dropped value in arm"); + break; + } + BUG(sp, "Override from Uninit to " << new_state); + break; + case VarState::Init: + switch( new_state ) + { + case VarState::Uninit: + // TODO: MaybeInit? + return VarState::MaybeMoved; + case VarState::Init: + return VarState::Init; + case VarState::MaybeMoved: + return VarState::MaybeMoved; + case VarState::Moved: + return VarState::MaybeMoved; + case VarState::InnerMoved: + TODO(sp, "Handle InnerMoved in Split scope (was Init)"); + break; + case VarState::Dropped: + BUG(sp, "Dropped value in arm"); + break; + } + break; + case VarState::InnerMoved: + // Need to tag for conditional shallow drop? Or just do that at the end of the split? + // - End of the split means that the only optional state is outer drop. + switch( new_state ) + { + case VarState::Uninit: + TODO(sp, "Handle InnerMoved in Split scope (new_states) - Now Uninit"); + case VarState::Init: + TODO(sp, "Handle InnerMoved in Split scope (new_states) - Now Init"); + case VarState::MaybeMoved: + TODO(sp, "Handle InnerMoved in Split scope (new_states) - Now MaybeMoved"); + case VarState::Moved: + TODO(sp, "Handle InnerMoved in Split scope (new_states) - Now Moved"); + case VarState::InnerMoved: + return VarState::InnerMoved; + case VarState::Dropped: + BUG(sp, "Dropped value in arm"); + } + break; + case VarState::MaybeMoved: + // Already optional, don't change + return VarState::MaybeMoved; + case VarState::Moved: + switch( new_state ) + { + case VarState::Uninit: + return VarState::Moved; + case VarState::Init: + // Wut? Reinited? + return VarState::MaybeMoved; + case VarState::MaybeMoved: + return VarState::MaybeMoved; + case VarState::Moved: + return VarState::Moved; + case VarState::InnerMoved: + TODO(sp, "Handle InnerMoved in Split scope (was Moved)"); + break; + case VarState::Dropped: + BUG(sp, "Dropped value in arm"); + break; } + break; + case VarState::Dropped: + TODO(sp, "How can an arm drop a value?"); + break; } + BUG(sp, "Unhandled combination"); } } void MirBuilder::complete_scope(ScopeDef& sd) @@ -702,176 +839,100 @@ void MirBuilder::complete_scope(ScopeDef& sd) (Split, ) ) - - // No macro for better debug output. - if( sd.data.is_Split() ) + + struct H { - auto& e = sd.data.as_Split(); - - assert( e.arms.size() > 1 ); - TRACE_FUNCTION_F("Split - " << (e.arms.size() - 1) << " arms"); - e.arms.pop_back(); - - // Merge all arms and apply upwards - size_t var_count = 0; - size_t tmp_count = 0; - for(const auto& arm : e.arms) - { - var_count = ::std::max(var_count, arm.var_states.size()); - tmp_count = ::std::max(tmp_count, arm.tmp_states.size()); - } - - struct StateMerger + static void apply_split_arms(MirBuilder& self, const Span& sp, ::std::vector<SplitArm>& arms) { - ::std::vector<bool> m_changed; - ::std::vector<VarState> m_new_states; - - StateMerger(size_t var_count): - m_changed(var_count), - m_new_states(var_count) - { - } - - void merge_arm_state(const Span& sp, unsigned int i, bool has_changed, VarState new_state) + // 1. Make a bitmap of changed states in all arms + // 2. Build up the final composite state of the first arm + ::std::map<unsigned int, VarState> new_var_states; + ::std::map<unsigned int, VarState> new_tmp_states; + const SplitArm* first_arm = nullptr; + for(const auto& arm : arms) { - assert(i < this->m_new_states.size()); - assert(i < this->m_changed.size()); - // If there is an existing chnge to the states. - if( this->m_changed[i] ) - { - DEBUG(i << " (" << this->m_new_states[i] << "," << new_state << ")"); - switch(m_new_states[i]) - { - case VarState::Uninit: - BUG(sp, "Override to Uninit"); - break; - case VarState::Init: - if( has_changed ) { - switch( new_state ) - { - case VarState::Uninit: - BUG(sp, "Override to Uninit"); - break; - case VarState::Init: - // No change - break; - case VarState::MaybeMoved: - m_new_states[i] = VarState::MaybeMoved; - break; - case VarState::Moved: - m_new_states[i] = VarState::MaybeMoved; - break; - case VarState::InnerMoved: - TODO(sp, "Handle InnerMoved in Split scope (Init:arm.var_states)"); - break; - case VarState::Dropped: - BUG(sp, "Dropped value in arm"); - break; - } - } - else { - m_new_states[i] = VarState::MaybeMoved; // MaybeInit? - } - break; - case VarState::InnerMoved: - // Need to tag for conditional shallow drop? Or just do that at the end of the split? - // - End of the split means that the only optional state is outer drop. - TODO(sp, "Handle InnerMoved in Split scope (new_states) - " << i /*<< " " << m_output.named_variables[i]*/); - break; - case VarState::MaybeMoved: - // Already optional, don't change - break; - case VarState::Moved: - if( has_changed ) { - switch( new_state ) - { - case VarState::Uninit: - // Wut? - break; - case VarState::Init: - // Wut? Reinited? - m_new_states[i] = VarState::MaybeMoved; // This arm didn't touch it - break; - case VarState::MaybeMoved: - m_new_states[i] = VarState::MaybeMoved; - break; - case VarState::Moved: - // No change - break; - case VarState::InnerMoved: - TODO(sp, "Handle InnerMoved in Split scope (Moved:arm.var_states)"); - break; - case VarState::Dropped: - BUG(sp, "Dropped value in arm"); - break; - } - } - else { - m_new_states[i] = VarState::MaybeMoved; // This arm didn't touch it - // TODO: If the original state was Uninit, this could be updated to Uninit? - } - break; - case VarState::Dropped: - TODO(sp, "How can an arm drop a value?"); - break; - } - } - else if( has_changed ) + if( arm.always_early_terminated ) + continue ; + for(const auto& vse : arm.var_states) { - DEBUG(i << " (_,"<<new_state<<")"); - m_changed[i] = true; - m_new_states[i] = new_state; + auto i = vse.first; + if( new_var_states.count(i) == 0 ) + new_var_states.insert( ::std::make_pair(i, vse.second) ); } - else + for(const auto& vse : arm.tmp_states) { - // No change in any seen arm + auto i = vse.first; + if( new_tmp_states.count(i) == 0 ) + new_tmp_states.insert( ::std::make_pair(i, vse.second) ); } + if( !first_arm ) + first_arm = &arm; } - }; - StateMerger sm_var { var_count }; - StateMerger sm_tmp { tmp_count }; - for(const auto& arm : e.arms) - { - DEBUG("><"); - if( arm.always_early_terminated ) - continue ; - assert( arm.changed_var_states.size() == arm.var_states.size() ); - for(unsigned int i = 0; i < arm.var_states.size(); i ++ ) + if( !first_arm ) { - sm_var.merge_arm_state(sd.span, i, arm.changed_var_states[i], arm.var_states[i]); + DEBUG("No arms yeilded"); + return ; } - DEBUG(">TMP<"); - assert( arm.changed_tmp_states.size() == arm.tmp_states.size() ); - for(unsigned int i = 0; i < arm.tmp_states.size(); i ++ ) + // 3. Compare the rest of the arms + for(const auto& arm : arms) { - sm_tmp.merge_arm_state(sd.span, i, arm.changed_tmp_states[i], arm.tmp_states[i]); + if( arm.always_early_terminated ) + continue ; + DEBUG("><"); + for(auto& se : new_var_states) + { + auto i = se.first; + DEBUG("- VAR" << i); + auto new_state = (arm.var_states.count(i) != 0 ? arm.var_states.at(i) : self.get_variable_state(sp, i)); + se.second = merge_state(sp, new_state, se.second); + } + for(auto& se : new_tmp_states) + { + auto i = se.first; + DEBUG("- TMP" << i); + auto new_state = (arm.tmp_states.count(i) != 0 ? arm.tmp_states.at(i) : self.get_temp_state(sp, i)); + se.second = merge_state(sp, new_state, se.second); + } } - } - for(unsigned int i = 0; i < var_count; i ++ ) - { - if( sm_var.m_changed[i] ) + // 4. Apply changes + for(const auto& se : new_var_states) { - // - NOTE: This scope should be off the stack now, so this call will get the original state - auto old_state = get_variable_state(sd.span, i); - auto new_state = sm_var.m_new_states[i]; - DEBUG("var" << i << " old_state = " << old_state << ", new_state = " << new_state); - set_variable_state(sd.span, i, new_state); + auto i = se.first; + auto new_state = se.second; + DEBUG("var" << i << " old_state = " << self.get_variable_state(sp, i) << ", new_state = " << new_state); + self.set_variable_state(sp, i, new_state); } - } - for(unsigned int i = 0; i < tmp_count; i ++ ) - { - if( sm_tmp.m_changed[i] ) + for(const auto& se : new_tmp_states) { - // - NOTE: This scope should be off the stack now, so this call will get the original state - auto old_state = get_temp_state(sd.span, i); - auto new_state = sm_tmp.m_new_states[i]; - DEBUG("tmp" << i << " old_state = " << old_state << ", new_state = " << new_state); - set_temp_state(sd.span, i, new_state); + auto i = se.first; + auto new_state = se.second; + DEBUG("tmp" << i << " old_state = " << self.get_temp_state(sp, i) << ", new_state = " << new_state); + self.set_temp_state(sp, i, new_state); } } + }; + + // No macro for better debug output. + if( sd.data.is_Loop() ) + { + auto& e = sd.data.as_Loop(); + TRACE_FUNCTION_F("Loop - " << e.exit_states.size() << " breaks"); + + // Merge all exit states and apply to output + H::apply_split_arms(*this, sd.span, e.exit_states); + } + else if( sd.data.is_Split() ) + { + auto& e = sd.data.as_Split(); + + assert( e.arms.size() > 1 ); + TRACE_FUNCTION_F("Split - " << (e.arms.size() - 1) << " arms"); + e.arms.pop_back(); + + H::apply_split_arms(*this, sd.span, e.arms); } } @@ -1075,10 +1136,10 @@ VarState MirBuilder::get_variable_state(const Span& sp, unsigned int idx) const ), (Split, const auto& cur_arm = e.arms.back(); - if( idx < cur_arm.changed_var_states.size() && cur_arm.changed_var_states[idx] ) + auto it = cur_arm.var_states.find(idx); + if( it != cur_arm.var_states.end() ) { - assert( idx < cur_arm.var_states.size() ); - return cur_arm.var_states[idx]; + return it->second; } ) ) @@ -1092,27 +1153,29 @@ void MirBuilder::set_variable_state(const Span& sp, unsigned int idx, VarState s for( auto scope_idx : ::reverse(m_scope_stack) ) { auto& scope_def = m_scopes.at(scope_idx); - TU_MATCH_DEF( ScopeType, (scope_def.data), (e), - ( - ), - (Variables, + if( scope_def.data.is_Variables() ) + { + const auto& e = scope_def.data.as_Variables(); auto it = ::std::find(e.vars.begin(), e.vars.end(), idx); if( it != e.vars.end() ) { break ; } - ), - (Split, + } + else if( scope_def.data.is_Split() ) + { + auto& e = scope_def.data.as_Split(); auto& cur_arm = e.arms.back(); - if( idx >= cur_arm.changed_var_states.size() ) { - cur_arm.changed_var_states.resize( idx + 1 ); - cur_arm.var_states.resize( idx + 1 ); - } - assert( idx < cur_arm.var_states.size() ); - cur_arm.changed_var_states[idx] = true; cur_arm.var_states[idx] = state; return ; - ) - ) + } + else if( scope_def.data.is_Loop() ) + { + auto& e = scope_def.data.as_Loop(); + e.changed_vars.insert( idx ); + } + else + { + } } ASSERT_BUG(sp, idx < m_variable_states.size(), "Variable " << idx << " out of range for state table"); @@ -1135,10 +1198,10 @@ VarState MirBuilder::get_temp_state(const Span& sp, unsigned int idx) const { const auto& e = scope_def.data.as_Split(); const auto& cur_arm = e.arms.back(); - if( idx < cur_arm.changed_tmp_states.size() && cur_arm.changed_tmp_states[idx] ) + auto it = cur_arm.tmp_states.find(idx); + if( it != cur_arm.tmp_states.end() ) { - assert( idx < cur_arm.tmp_states.size() ); - return cur_arm.tmp_states[idx]; + return it->second; } } } @@ -1163,15 +1226,17 @@ void MirBuilder::set_temp_state(const Span& sp, unsigned int idx, VarState state { auto& e = scope_def.data.as_Split(); auto& cur_arm = e.arms.back(); - if( idx >= cur_arm.changed_tmp_states.size() ) { - cur_arm.changed_tmp_states.resize( idx + 1 ); - cur_arm.tmp_states.resize( idx + 1 ); - } - assert( idx < cur_arm.tmp_states.size() ); - cur_arm.changed_tmp_states[idx] = true; cur_arm.tmp_states[idx] = state; return ; } + else if( scope_def.data.is_Loop() ) + { + auto& e = scope_def.data.as_Loop(); + e.changed_tmps.insert( idx ); + } + else + { + } } ASSERT_BUG(sp, idx < m_temporary_states.size(), "Temporary " << idx << " out of range for state table"); @@ -1197,11 +1262,11 @@ void MirBuilder::drop_scope_values(const ScopeDef& sd) break; case VarState::Init: push_stmt_drop( sd.span, ::MIR::LValue::make_Temporary({ tmp_idx }) ); - set_temp_state(sd.span, tmp_idx, VarState::Dropped); + //set_temp_state(sd.span, tmp_idx, VarState::Dropped); break; case VarState::InnerMoved: push_stmt_drop_shallow( sd.span, ::MIR::LValue::make_Temporary({ tmp_idx }) ); - set_temp_state(sd.span, tmp_idx, VarState::Dropped); + //set_temp_state(sd.span, tmp_idx, VarState::Dropped); break; case VarState::MaybeMoved: //BUG(sd.span, "Optionally moved temporary? - " << tmp_idx); @@ -1223,10 +1288,11 @@ void MirBuilder::drop_scope_values(const ScopeDef& sd) break; case VarState::InnerMoved: push_stmt_drop_shallow( sd.span, ::MIR::LValue::make_Variable(var_idx) ); + //set_variable_state(sd.span, var_idx, VarState::Dropped); break; case VarState::MaybeMoved: - //TODO(sd.span, "Include drop flags"); // TODO: Drop flags + //push_stmt_drop_opt(sd.span, ::MIR::LValue::make_Variable(var_idx), drop_flag_idx); break; } } diff --git a/src/trans/codegen_c.cpp b/src/trans/codegen_c.cpp index 37acb33b..8836af54 100644 --- a/src/trans/codegen_c.cpp +++ b/src/trans/codegen_c.cpp @@ -50,6 +50,7 @@ namespace { << "typedef struct { } tTYPEID;\n" << "typedef struct { void* PTR; size_t META; } SLICE_PTR;\n" << "typedef struct { void* PTR; void* META; } TRAITOBJ_PTR;\n" + << "typedef struct { size_t size; size_t align; } VTABLE_HDR;\n" << "\n" << "extern void _Unwind_Resume(void);\n" << "\n" @@ -134,7 +135,17 @@ namespace { emit_ctype( monomorph(ty), inner ); } }; + m_of << "// struct " << p << "\n"; m_of << "struct s_" << Trans_Mangle(p) << " {\n"; + + // HACK: For vtables, insert the alignment and size at the start + { + const auto& lc = p.m_path.m_components.back(); + if( lc.size() > 7 && ::std::strcmp(lc.c_str() + lc.size() - 7, "#vtable") == 0 ) { + m_of << "\tVTABLE_HDR hdr;\n"; + } + } + TU_MATCHA( (item.m_data), (e), (Unit, ), @@ -190,14 +201,32 @@ namespace { if( item.m_markings.has_drop_impl ) { m_of << "tUNIT " << Trans_Mangle( ::HIR::Path(struct_ty.clone(), m_resolve.m_lang_Drop, "drop") ) << "(struct s_" << Trans_Mangle(p) << "*rv);\n"; } - + else if( const auto* ity = m_resolve.is_type_owned_box(struct_ty) ) + { + ::HIR::TypeRef inner_ptr = ::HIR::TypeRef::new_pointer( ::HIR::BorrowType::Unique, ity->clone() ); + ::HIR::GenericPath box_free { m_crate.get_lang_item_path(sp, "box_free"), { ity->clone() } }; + m_of << "tUNIT " << Trans_Mangle(box_free) << "("; emit_ctype(inner_ptr, FMT_CB(ss, ss << "tmp0"; )); m_of << ");\n"; + } + m_of << "void " << Trans_Mangle(drop_glue_path) << "(struct s_" << Trans_Mangle(p) << "* rv) {\n"; // If this type has an impl of Drop, call that impl if( item.m_markings.has_drop_impl ) { m_of << "\t" << Trans_Mangle( ::HIR::Path(struct_ty.clone(), m_resolve.m_lang_Drop, "drop") ) << "(rv);\n"; } - + else if( const auto* ity = m_resolve.is_type_owned_box(struct_ty) ) + { + // Obtain inner pointer + // TODO: This is very specific to the structure of the official liballoc's Box. + ::HIR::TypeRef inner_ptr = ::HIR::TypeRef::new_pointer( ::HIR::BorrowType::Unique, ity->clone() ); + m_of << "\t"; emit_ctype(inner_ptr, FMT_CB(ss, ss << "tmp0"; )); m_of << " = rv->_0._0._0;\n"; + // Call destructor of inner data + emit_destructor_call( ::MIR::LValue::make_Deref({ box$(::MIR::LValue::make_Temporary({0})) }), *ity, true); + // Emit a call to box_free for the type + ::HIR::GenericPath box_free { m_crate.get_lang_item_path(sp, "box_free"), { ity->clone() } }; + m_of << "\t" << Trans_Mangle(box_free) << "(tmp0);\n"; + } + auto self = ::MIR::LValue::make_Deref({ box$(::MIR::LValue::make_Return({})) }); auto fld_lv = ::MIR::LValue::make_Field({ box$(self), 0 }); TU_MATCHA( (item.m_data), (e), @@ -506,11 +535,16 @@ namespace { auto monomorph_cb_trait = monomorphise_type_get_cb(sp, &type, &trait_path.m_params, nullptr); - // TODO: Alignment and destructor + // Size, Alignment, and destructor + m_of << "{ "; + m_of << "sizeof("; emit_ctype(type); m_of << "),"; + m_of << "__alignof__("; emit_ctype(type); m_of << "),"; + // TODO: Drop glue + m_of << "}"; // No newline, added below + for(unsigned int i = 0; i < trait.m_value_indexes.size(); i ++ ) { - if( i != 0 ) - m_of << ",\n"; + m_of << ",\n"; for(const auto& m : trait.m_value_indexes) { if( m.second.first != i ) @@ -585,7 +619,7 @@ namespace { if( code->blocks[i].statements.size() == 0 && code->blocks[i].terminator.is_Diverge() ) { DEBUG("- Diverge only, omitting"); - m_of << "bb" << i << ": _Unwind_Resume(); // Diverge\n"; + m_of << "bb" << i << ": _Unwind_Resume(); // Diverge\n"; continue ; } @@ -602,10 +636,26 @@ namespace { ::HIR::TypeRef tmp; const auto& ty = mir_res.get_lvalue_type(tmp, e.slot); - if( e.kind == ::MIR::eDropKind::SHALLOW ) { - // TODO: Shallow drops are only valid on owned_box + switch( e.kind ) + { + case ::MIR::eDropKind::SHALLOW: + // Shallow drops are only valid on owned_box + if( const auto* ity = m_resolve.is_type_owned_box(ty) ) + { + // Emit a call to box_free for the type + ::HIR::GenericPath box_free { m_crate.get_lang_item_path(sp, "box_free"), { ity->clone() } }; + // TODO: This is specific to the official liballoc's owned_box + m_of << "\t" << Trans_Mangle(box_free) << "("; emit_lvalue(e.slot); m_of << "._0._0._0);\n"; + } + else + { + MIR_BUG(mir_res, "Shallow drop on non-Box - " << ty); + } + break; + case ::MIR::eDropKind::DEEP: + emit_destructor_call(e.slot, ty, false); + break; } - emit_destructor_call(e.slot, ty, false); } else { const auto& e = stmt.as_Assign(); @@ -959,11 +1009,13 @@ namespace { case MetadataType::None: m_of << "sizeof("; emit_ctype(ty); m_of << ")"; break; - case MetadataType::Slice: - MIR_TODO(mir_res, "size_of_val - " << ty); - break; + case MetadataType::Slice: { + // TODO: Have a function that fetches the inner type for types like `Path` or `str` + const auto& ity = *ty.m_data.as_Slice().inner; + emit_lvalue(e.args.at(0)); m_of << ".META * sizeof("; emit_ctype(ity); m_of << ")"; + break; } case MetadataType::TraitObject: - MIR_TODO(mir_res, "size_of_val - " << ty); + m_of << "((VTABLE_HDR*)"; emit_lvalue(e.args.at(0)); m_of << ".META)->size"; break; } } @@ -974,11 +1026,13 @@ namespace { case MetadataType::None: m_of << "__alignof__("; emit_ctype(ty); m_of << ")"; break; - case MetadataType::Slice: - MIR_TODO(mir_res, "min_align_of_val - " << ty); - break; + case MetadataType::Slice: { + // TODO: Have a function that fetches the inner type for types like `Path` or `str` + const auto& ity = *ty.m_data.as_Slice().inner; + m_of << "__alignof__("; emit_ctype(ity); m_of << ")"; + break; } case MetadataType::TraitObject: - MIR_TODO(mir_res, "min_align_of_val - " << ty); + m_of << "((VTABLE_HDR*)"; emit_lvalue(e.args.at(0)); m_of << ".META)->align"; break; } } @@ -1097,6 +1151,14 @@ namespace { else if( name == "atomic_cxchg_acqrel_failrelaxed" ) { emit_atomic_cxchg(e, "memory_order_acq_rel", "memory_order_relaxed"); } + // _rel = Release, Relaxed (not Release,Release) + else if( name == "atomic_cxchg_rel" ) { + emit_atomic_cxchg(e, "memory_order_release", "memory_order_relaxed"); + } + // _acqrel = Release, Acquire (not AcqRel,AcqRel) + else if( name == "atomic_cxchg_acqrel" ) { + emit_atomic_cxchg(e, "memory_order_acq_rel", "memory_order_acquire"); + } else if( name.compare(0, 7+6+4, "atomic_cxchg_fail") == 0 ) { auto fail_ordering = H::get_atomic_ordering(mir_res, name, 7+6+4); emit_atomic_cxchg(e, "memory_order_seq_cst", fail_ordering); diff --git a/src/trans/enumerate.cpp b/src/trans/enumerate.cpp index 78270b7b..81b02199 100644 --- a/src/trans/enumerate.cpp +++ b/src/trans/enumerate.cpp @@ -287,6 +287,7 @@ namespace { // Enumerate types required for the enumerated items void Trans_Enumerate_Types(TransList& out, const ::HIR::Crate& crate) { + static Span sp; TypeVisitor tv { crate, out.m_types }; unsigned int types_count = 0; @@ -349,11 +350,18 @@ void Trans_Enumerate_Types(TransList& out, const ::HIR::Crate& crate) if( markings_ptr->has_drop_impl ) { // Add the Drop impl to the codegen list - Trans_Enumerate_FillFrom_Path(out, crate, ::HIR::Path( ty.clone(), crate.get_lang_item_path(Span(), "drop"), "drop"), {}); - + Trans_Enumerate_FillFrom_Path(out, crate, ::HIR::Path( ty.clone(), crate.get_lang_item_path(sp, "drop"), "drop"), {}); constructors_added = true; } } + + if( const auto* ity = tv.m_resolve.is_type_owned_box(ty) ) + { + // Reqire drop glue for inner type. + // - Should that already exist? + // Requires box_free lang item + Trans_Enumerate_FillFrom_Path(out, crate, ::HIR::GenericPath( crate.get_lang_item_path(sp, "box_free"), { ity->clone() } ), {});; + } } types_count = out.m_types.size(); } while(constructors_added); diff --git a/src/trans/mangling.cpp b/src/trans/mangling.cpp index c14b1b29..4ea56581 100644 --- a/src/trans/mangling.cpp +++ b/src/trans/mangling.cpp @@ -51,14 +51,20 @@ namespace { } -::FmtLambda Trans_Mangle(const ::HIR::GenericPath& path) +::FmtLambda Trans_Mangle(const ::HIR::SimplePath& path) { return FMT_CB(ss, - ss << "_ZN" << path.m_path.m_crate_name.size() << path.m_path.m_crate_name; - for(const auto& comp : path.m_path.m_components) { + ss << "_ZN" << path.m_crate_name.size() << path.m_crate_name; + for(const auto& comp : path.m_components) { auto v = escape_str(comp); ss << v.size() << v; } + ); +} +::FmtLambda Trans_Mangle(const ::HIR::GenericPath& path) +{ + return FMT_CB(ss, + ss << Trans_Mangle(path.m_path); ss << emit_params(path.m_params); ); } diff --git a/src/trans/mangling.hpp b/src/trans/mangling.hpp index ad6c9add..e1b6e35e 100644 --- a/src/trans/mangling.hpp +++ b/src/trans/mangling.hpp @@ -10,11 +10,13 @@ #include <debug.hpp> namespace HIR { + class SimplePath; class GenericPath; class Path; class TypeRef; } +extern ::FmtLambda Trans_Mangle(const ::HIR::SimplePath& path); extern ::FmtLambda Trans_Mangle(const ::HIR::GenericPath& path); extern ::FmtLambda Trans_Mangle(const ::HIR::Path& path); extern ::FmtLambda Trans_Mangle(const ::HIR::TypeRef& ty); |