diff options
-rw-r--r-- | src/hir/dump.cpp | 2 | ||||
-rw-r--r-- | src/mir/check.cpp | 20 | ||||
-rw-r--r-- | src/mir/dump.cpp | 11 | ||||
-rw-r--r-- | src/mir/from_hir.cpp | 14 | ||||
-rw-r--r-- | src/mir/from_hir_match.cpp | 3 | ||||
-rw-r--r-- | src/mir/mir_builder.cpp | 14 | ||||
-rw-r--r-- | src/trans/codegen_c.cpp | 96 | ||||
-rw-r--r-- | src/trans/enumerate.cpp | 12 | ||||
-rw-r--r-- | src/trans/mangling.cpp | 12 | ||||
-rw-r--r-- | src/trans/mangling.hpp | 2 |
10 files changed, 155 insertions, 31 deletions
diff --git a/src/hir/dump.cpp b/src/hir/dump.cpp index ea113a00..5be6ab92 100644 --- a/src/hir/dump.cpp +++ b/src/hir/dump.cpp @@ -678,7 +678,7 @@ namespace { private: RepeatLitStr indent() const { - return RepeatLitStr { " ", static_cast<int>(m_indent_level) }; + return RepeatLitStr { " ", static_cast<int>(m_indent_level) }; } void inc_indent() { m_indent_level ++; diff --git a/src/mir/check.cpp b/src/mir/check.cpp index 186e93fb..d27049ea 100644 --- a/src/mir/check.cpp +++ b/src/mir/check.cpp @@ -265,7 +265,9 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path val_state.ensure_valid(state, se.val); ), (Cast, - val_state.move_val(state, se.val); + // Well.. it's not exactly moved... + val_state.ensure_valid(state, se.val); + //val_state.move_val(state, se.val); ), (BinOp, val_state.move_val(state, se.val_l); @@ -342,14 +344,26 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path ), (If, // Push blocks + val_state.ensure_valid( state, e.cond ); to_visit_blocks.push_back( ::std::make_pair(e.bb0, val_state) ); to_visit_blocks.push_back( ::std::make_pair(e.bb1, ::std::move(val_state)) ); ), (Switch, - // TODO: Push blocks + val_state.ensure_valid( state, e.val ); + for(const auto& tgt : e.targets) + { + to_visit_blocks.push_back( ::std::make_pair(tgt, val_state) ); + } ), (Call, - // TODO: Push blocks (with return valid only in one) + if( e.fcn.is_Value() ) + val_state.ensure_valid( state, e.fcn.as_Value() ); + for(const auto& arg : e.args) + val_state.ensure_valid( state, arg ); + // Push blocks (with return valid only in one) + to_visit_blocks.push_back( ::std::make_pair(e.panic_block, val_state) ); + val_state.mark_validity( state, e.ret_val, true ); + to_visit_blocks.push_back( ::std::make_pair(e.ret_block, ::std::move(val_state)) ); ) ) } diff --git a/src/mir/dump.cpp b/src/mir/dump.cpp index 555093f2..d143c2c9 100644 --- a/src/mir/dump.cpp +++ b/src/mir/dump.cpp @@ -165,7 +165,16 @@ namespace { ), (Drop, DEBUG("- DROP " << e.slot); - m_os << "drop(" << FMT_M(e.slot) << ");\n"; + m_os << "drop(" << FMT_M(e.slot); + switch( e.kind ) + { + case ::MIR::eDropKind::SHALLOW: + m_os << " SHALLOW"; + break; + case ::MIR::eDropKind::DEEP: + break; + } + m_os << ");\n"; ) ) } diff --git a/src/mir/from_hir.cpp b/src/mir/from_hir.cpp index e21ca993..48c0d948 100644 --- a/src/mir/from_hir.cpp +++ b/src/mir/from_hir.cpp @@ -355,7 +355,7 @@ namespace { } } - // - For the last node, don't bother with a statement scope + // For the last node, specially handle. if( node.m_yields_final ) { auto& subnode = node.m_nodes.back(); @@ -594,14 +594,17 @@ namespace { // 'true' branch { + auto stmt_scope = m_builder.new_scope_temp(node.m_true->span()); m_builder.set_cur_block(true_branch); this->visit_node_ptr(node.m_true); if( m_builder.block_active() || m_builder.has_result() ) { m_builder.push_stmt_assign( node.span(), result_val.clone(), m_builder.get_result(node.m_true->span()) ); + m_builder.terminate_scope(node.span(), mv$(stmt_scope)); m_builder.end_split_arm(node.span(), scope, true); m_builder.end_block( ::MIR::Terminator::make_Goto(next_block) ); } else { + { auto _ = mv$(stmt_scope); } m_builder.end_split_arm(node.span(), scope, false); } } @@ -610,14 +613,17 @@ namespace { m_builder.set_cur_block(false_branch); if( node.m_false ) { + auto stmt_scope = m_builder.new_scope_temp(node.m_false->span()); this->visit_node_ptr(node.m_false); if( m_builder.block_active() ) { m_builder.push_stmt_assign( node.span(), result_val.clone(), m_builder.get_result(node.m_false->span()) ); + m_builder.terminate_scope(node.span(), mv$(stmt_scope)); m_builder.end_block( ::MIR::Terminator::make_Goto(next_block) ); m_builder.end_split_arm(node.span(), scope, true); } else { + { auto _ = mv$(stmt_scope); } m_builder.end_split_arm(node.span(), scope, false); } } @@ -1798,7 +1804,11 @@ namespace { assert( !values_set[idx] ); values_set[idx] = true; this->visit_node_ptr(valnode); - values.at(idx) = m_builder.lvalue_or_temp( valnode->span(), valnode->m_res_type, m_builder.get_result(valnode->span()) ); + + // NOTE: Have to allocate a new temporary because ordering matters + auto tmp = m_builder.new_temporary(valnode->m_res_type); + m_builder.push_stmt_assign( valnode->span(), tmp.clone(), m_builder.get_result(valnode->span()) ); + values.at(idx) = mv$(tmp); } for(unsigned int i = 0; i < values.size(); i ++) { diff --git a/src/mir/from_hir_match.cpp b/src/mir/from_hir_match.cpp index d3c5a5eb..2a9a8dd8 100644 --- a/src/mir/from_hir_match.cpp +++ b/src/mir/from_hir_match.cpp @@ -251,6 +251,7 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod // - Define variables from the first pattern conv.define_vars_from(node.span(), arm.m_patterns.front()); + auto pat_scope = builder.new_scope_split(node.span()); for( unsigned int pat_idx = 0; pat_idx < arm.m_patterns.size(); pat_idx ++ ) { const auto& pat = arm.m_patterns[pat_idx]; @@ -271,9 +272,11 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod ac.destructures.push_back( builder.new_bb_unlinked() ); builder.set_cur_block( ac.destructures.back() ); conv.destructure_from( arm.m_code->span(), pat, match_val.clone(), true ); + builder.end_split_arm( arm.m_code->span(), pat_scope, true ); builder.pause_cur_block(); // NOTE: Paused block resumed upon successful match } + builder.terminate_scope( arm.m_code->span(), mv$(pat_scope) ); // TODO: If this pattern ignores fields with Drop impls, this will lead to leaks. // - Ideally, this would trigger a drop of whatever wasn't already taken by the pattern. diff --git a/src/mir/mir_builder.cpp b/src/mir/mir_builder.cpp index cd7ddbcd..c0bf34a1 100644 --- a/src/mir/mir_builder.cpp +++ b/src/mir/mir_builder.cpp @@ -263,6 +263,7 @@ void MirBuilder::push_stmt_drop(const Span& sp, ::MIR::LValue val) return ; } + DEBUG("DROP " << val); m_output.blocks.at(m_current_block).statements.push_back( ::MIR::Statement::make_Drop({ ::MIR::eDropKind::DEEP, mv$(val) }) ); } void MirBuilder::push_stmt_drop_shallow(const Span& sp, ::MIR::LValue val) @@ -276,6 +277,7 @@ void MirBuilder::push_stmt_drop_shallow(const Span& sp, ::MIR::LValue val) // return ; //} + DEBUG("DROP shallow " << val); m_output.blocks.at(m_current_block).statements.push_back( ::MIR::Statement::make_Drop({ ::MIR::eDropKind::SHALLOW, mv$(val) }) ); } @@ -341,9 +343,15 @@ void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val) TU_MATCH_DEF(::MIR::LValue, (val), (e), ( ), + // TODO: This may not be correct, because it can change the drop points and ordering + // HACK: Working around cases where values are dropped while the result is not yet used. (Deref, - // TODO: This may not be correct, because it can change the drop points and ordering - // HACK: Working around cases where values are dropped while the result is not yet used. + raise_variables(sp, *e.val); + ), + (Field, + raise_variables(sp, *e.val); + ), + (Downcast, raise_variables(sp, *e.val); ), // Actual value types @@ -821,6 +829,7 @@ void MirBuilder::complete_scope(ScopeDef& sd) DEBUG(i << " (_,"<<new_state<<")"); m_changed[i] = true; m_new_states[i] = new_state; + // TODO: Store the original state for comparison? } else { @@ -1223,6 +1232,7 @@ void MirBuilder::drop_scope_values(const ScopeDef& sd) break; case VarState::InnerMoved: push_stmt_drop_shallow( sd.span, ::MIR::LValue::make_Variable(var_idx) ); + set_variable_state(sd.span, var_idx, VarState::Dropped); break; case VarState::MaybeMoved: //TODO(sd.span, "Include drop flags"); diff --git a/src/trans/codegen_c.cpp b/src/trans/codegen_c.cpp index 37acb33b..8836af54 100644 --- a/src/trans/codegen_c.cpp +++ b/src/trans/codegen_c.cpp @@ -50,6 +50,7 @@ namespace { << "typedef struct { } tTYPEID;\n" << "typedef struct { void* PTR; size_t META; } SLICE_PTR;\n" << "typedef struct { void* PTR; void* META; } TRAITOBJ_PTR;\n" + << "typedef struct { size_t size; size_t align; } VTABLE_HDR;\n" << "\n" << "extern void _Unwind_Resume(void);\n" << "\n" @@ -134,7 +135,17 @@ namespace { emit_ctype( monomorph(ty), inner ); } }; + m_of << "// struct " << p << "\n"; m_of << "struct s_" << Trans_Mangle(p) << " {\n"; + + // HACK: For vtables, insert the alignment and size at the start + { + const auto& lc = p.m_path.m_components.back(); + if( lc.size() > 7 && ::std::strcmp(lc.c_str() + lc.size() - 7, "#vtable") == 0 ) { + m_of << "\tVTABLE_HDR hdr;\n"; + } + } + TU_MATCHA( (item.m_data), (e), (Unit, ), @@ -190,14 +201,32 @@ namespace { if( item.m_markings.has_drop_impl ) { m_of << "tUNIT " << Trans_Mangle( ::HIR::Path(struct_ty.clone(), m_resolve.m_lang_Drop, "drop") ) << "(struct s_" << Trans_Mangle(p) << "*rv);\n"; } - + else if( const auto* ity = m_resolve.is_type_owned_box(struct_ty) ) + { + ::HIR::TypeRef inner_ptr = ::HIR::TypeRef::new_pointer( ::HIR::BorrowType::Unique, ity->clone() ); + ::HIR::GenericPath box_free { m_crate.get_lang_item_path(sp, "box_free"), { ity->clone() } }; + m_of << "tUNIT " << Trans_Mangle(box_free) << "("; emit_ctype(inner_ptr, FMT_CB(ss, ss << "tmp0"; )); m_of << ");\n"; + } + m_of << "void " << Trans_Mangle(drop_glue_path) << "(struct s_" << Trans_Mangle(p) << "* rv) {\n"; // If this type has an impl of Drop, call that impl if( item.m_markings.has_drop_impl ) { m_of << "\t" << Trans_Mangle( ::HIR::Path(struct_ty.clone(), m_resolve.m_lang_Drop, "drop") ) << "(rv);\n"; } - + else if( const auto* ity = m_resolve.is_type_owned_box(struct_ty) ) + { + // Obtain inner pointer + // TODO: This is very specific to the structure of the official liballoc's Box. + ::HIR::TypeRef inner_ptr = ::HIR::TypeRef::new_pointer( ::HIR::BorrowType::Unique, ity->clone() ); + m_of << "\t"; emit_ctype(inner_ptr, FMT_CB(ss, ss << "tmp0"; )); m_of << " = rv->_0._0._0;\n"; + // Call destructor of inner data + emit_destructor_call( ::MIR::LValue::make_Deref({ box$(::MIR::LValue::make_Temporary({0})) }), *ity, true); + // Emit a call to box_free for the type + ::HIR::GenericPath box_free { m_crate.get_lang_item_path(sp, "box_free"), { ity->clone() } }; + m_of << "\t" << Trans_Mangle(box_free) << "(tmp0);\n"; + } + auto self = ::MIR::LValue::make_Deref({ box$(::MIR::LValue::make_Return({})) }); auto fld_lv = ::MIR::LValue::make_Field({ box$(self), 0 }); TU_MATCHA( (item.m_data), (e), @@ -506,11 +535,16 @@ namespace { auto monomorph_cb_trait = monomorphise_type_get_cb(sp, &type, &trait_path.m_params, nullptr); - // TODO: Alignment and destructor + // Size, Alignment, and destructor + m_of << "{ "; + m_of << "sizeof("; emit_ctype(type); m_of << "),"; + m_of << "__alignof__("; emit_ctype(type); m_of << "),"; + // TODO: Drop glue + m_of << "}"; // No newline, added below + for(unsigned int i = 0; i < trait.m_value_indexes.size(); i ++ ) { - if( i != 0 ) - m_of << ",\n"; + m_of << ",\n"; for(const auto& m : trait.m_value_indexes) { if( m.second.first != i ) @@ -585,7 +619,7 @@ namespace { if( code->blocks[i].statements.size() == 0 && code->blocks[i].terminator.is_Diverge() ) { DEBUG("- Diverge only, omitting"); - m_of << "bb" << i << ": _Unwind_Resume(); // Diverge\n"; + m_of << "bb" << i << ": _Unwind_Resume(); // Diverge\n"; continue ; } @@ -602,10 +636,26 @@ namespace { ::HIR::TypeRef tmp; const auto& ty = mir_res.get_lvalue_type(tmp, e.slot); - if( e.kind == ::MIR::eDropKind::SHALLOW ) { - // TODO: Shallow drops are only valid on owned_box + switch( e.kind ) + { + case ::MIR::eDropKind::SHALLOW: + // Shallow drops are only valid on owned_box + if( const auto* ity = m_resolve.is_type_owned_box(ty) ) + { + // Emit a call to box_free for the type + ::HIR::GenericPath box_free { m_crate.get_lang_item_path(sp, "box_free"), { ity->clone() } }; + // TODO: This is specific to the official liballoc's owned_box + m_of << "\t" << Trans_Mangle(box_free) << "("; emit_lvalue(e.slot); m_of << "._0._0._0);\n"; + } + else + { + MIR_BUG(mir_res, "Shallow drop on non-Box - " << ty); + } + break; + case ::MIR::eDropKind::DEEP: + emit_destructor_call(e.slot, ty, false); + break; } - emit_destructor_call(e.slot, ty, false); } else { const auto& e = stmt.as_Assign(); @@ -959,11 +1009,13 @@ namespace { case MetadataType::None: m_of << "sizeof("; emit_ctype(ty); m_of << ")"; break; - case MetadataType::Slice: - MIR_TODO(mir_res, "size_of_val - " << ty); - break; + case MetadataType::Slice: { + // TODO: Have a function that fetches the inner type for types like `Path` or `str` + const auto& ity = *ty.m_data.as_Slice().inner; + emit_lvalue(e.args.at(0)); m_of << ".META * sizeof("; emit_ctype(ity); m_of << ")"; + break; } case MetadataType::TraitObject: - MIR_TODO(mir_res, "size_of_val - " << ty); + m_of << "((VTABLE_HDR*)"; emit_lvalue(e.args.at(0)); m_of << ".META)->size"; break; } } @@ -974,11 +1026,13 @@ namespace { case MetadataType::None: m_of << "__alignof__("; emit_ctype(ty); m_of << ")"; break; - case MetadataType::Slice: - MIR_TODO(mir_res, "min_align_of_val - " << ty); - break; + case MetadataType::Slice: { + // TODO: Have a function that fetches the inner type for types like `Path` or `str` + const auto& ity = *ty.m_data.as_Slice().inner; + m_of << "__alignof__("; emit_ctype(ity); m_of << ")"; + break; } case MetadataType::TraitObject: - MIR_TODO(mir_res, "min_align_of_val - " << ty); + m_of << "((VTABLE_HDR*)"; emit_lvalue(e.args.at(0)); m_of << ".META)->align"; break; } } @@ -1097,6 +1151,14 @@ namespace { else if( name == "atomic_cxchg_acqrel_failrelaxed" ) { emit_atomic_cxchg(e, "memory_order_acq_rel", "memory_order_relaxed"); } + // _rel = Release, Relaxed (not Release,Release) + else if( name == "atomic_cxchg_rel" ) { + emit_atomic_cxchg(e, "memory_order_release", "memory_order_relaxed"); + } + // _acqrel = Release, Acquire (not AcqRel,AcqRel) + else if( name == "atomic_cxchg_acqrel" ) { + emit_atomic_cxchg(e, "memory_order_acq_rel", "memory_order_acquire"); + } else if( name.compare(0, 7+6+4, "atomic_cxchg_fail") == 0 ) { auto fail_ordering = H::get_atomic_ordering(mir_res, name, 7+6+4); emit_atomic_cxchg(e, "memory_order_seq_cst", fail_ordering); diff --git a/src/trans/enumerate.cpp b/src/trans/enumerate.cpp index 78270b7b..81b02199 100644 --- a/src/trans/enumerate.cpp +++ b/src/trans/enumerate.cpp @@ -287,6 +287,7 @@ namespace { // Enumerate types required for the enumerated items void Trans_Enumerate_Types(TransList& out, const ::HIR::Crate& crate) { + static Span sp; TypeVisitor tv { crate, out.m_types }; unsigned int types_count = 0; @@ -349,11 +350,18 @@ void Trans_Enumerate_Types(TransList& out, const ::HIR::Crate& crate) if( markings_ptr->has_drop_impl ) { // Add the Drop impl to the codegen list - Trans_Enumerate_FillFrom_Path(out, crate, ::HIR::Path( ty.clone(), crate.get_lang_item_path(Span(), "drop"), "drop"), {}); - + Trans_Enumerate_FillFrom_Path(out, crate, ::HIR::Path( ty.clone(), crate.get_lang_item_path(sp, "drop"), "drop"), {}); constructors_added = true; } } + + if( const auto* ity = tv.m_resolve.is_type_owned_box(ty) ) + { + // Reqire drop glue for inner type. + // - Should that already exist? + // Requires box_free lang item + Trans_Enumerate_FillFrom_Path(out, crate, ::HIR::GenericPath( crate.get_lang_item_path(sp, "box_free"), { ity->clone() } ), {});; + } } types_count = out.m_types.size(); } while(constructors_added); diff --git a/src/trans/mangling.cpp b/src/trans/mangling.cpp index c14b1b29..4ea56581 100644 --- a/src/trans/mangling.cpp +++ b/src/trans/mangling.cpp @@ -51,14 +51,20 @@ namespace { } -::FmtLambda Trans_Mangle(const ::HIR::GenericPath& path) +::FmtLambda Trans_Mangle(const ::HIR::SimplePath& path) { return FMT_CB(ss, - ss << "_ZN" << path.m_path.m_crate_name.size() << path.m_path.m_crate_name; - for(const auto& comp : path.m_path.m_components) { + ss << "_ZN" << path.m_crate_name.size() << path.m_crate_name; + for(const auto& comp : path.m_components) { auto v = escape_str(comp); ss << v.size() << v; } + ); +} +::FmtLambda Trans_Mangle(const ::HIR::GenericPath& path) +{ + return FMT_CB(ss, + ss << Trans_Mangle(path.m_path); ss << emit_params(path.m_params); ); } diff --git a/src/trans/mangling.hpp b/src/trans/mangling.hpp index ad6c9add..e1b6e35e 100644 --- a/src/trans/mangling.hpp +++ b/src/trans/mangling.hpp @@ -10,11 +10,13 @@ #include <debug.hpp> namespace HIR { + class SimplePath; class GenericPath; class Path; class TypeRef; } +extern ::FmtLambda Trans_Mangle(const ::HIR::SimplePath& path); extern ::FmtLambda Trans_Mangle(const ::HIR::GenericPath& path); extern ::FmtLambda Trans_Mangle(const ::HIR::Path& path); extern ::FmtLambda Trans_Mangle(const ::HIR::TypeRef& ty); |