summaryrefslogtreecommitdiff
path: root/src/mir
diff options
context:
space:
mode:
authorubsan <npmazzuca@gmail.com>2016-12-24 19:07:08 -0700
committerubsan <npmazzuca@gmail.com>2016-12-24 19:07:08 -0700
commit0c14c734fa32014fd24297ccdbed927016185ffd (patch)
treeed4bbff4be39c44c57164641f4ed32b5ad4675f0 /src/mir
parentd12a8a886caf2e0edf33c1af831b1df990d2c892 (diff)
downloadmrust-0c14c734fa32014fd24297ccdbed927016185ffd.tar.gz
No more tears!
No more DOS line endings or trailing whitespace either
Diffstat (limited to 'src/mir')
-rw-r--r--src/mir/check.cpp26
-rw-r--r--src/mir/cleanup.cpp164
-rw-r--r--src/mir/dump.cpp46
-rw-r--r--src/mir/from_hir.cpp310
-rw-r--r--src/mir/from_hir.hpp54
-rw-r--r--src/mir/from_hir_match.cpp454
-rw-r--r--src/mir/helpers.cpp2
-rw-r--r--src/mir/helpers.hpp14
-rw-r--r--src/mir/mir.cpp8
-rw-r--r--src/mir/mir.hpp8
-rw-r--r--src/mir/mir_builder.cpp98
-rw-r--r--src/mir/mir_ptr.hpp8
-rw-r--r--src/mir/optimise.cpp26
-rw-r--r--src/mir/visit_crate_mir.cpp6
-rw-r--r--src/mir/visit_crate_mir.hpp6
15 files changed, 615 insertions, 615 deletions
diff --git a/src/mir/check.cpp b/src/mir/check.cpp
index 65473b50..186e93fb 100644
--- a/src/mir/check.cpp
+++ b/src/mir/check.cpp
@@ -35,10 +35,10 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
continue ;
}
visited_bbs[block] = true;
-
-
+
+
state.set_cur_stmt_term(block);
-
+
#define PUSH_BB(idx, desc) do {\
if( !(idx < fcn.blocks.size() ) ) MIR_BUG(state, "Invalid target block - " << desc << " bb" << idx);\
if( visited_bbs[idx] == false ) {\
@@ -81,7 +81,7 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
DEBUG("- Function doesn't return.");
}
}
-
+
// [ValState] = Value state tracking (use after move, uninit, ...)
// - [ValState] No drops or usage of uninitalised values (Uninit, Moved, or Dropped)
// - [ValState] Temporaries are write-once.
@@ -98,7 +98,7 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
::std::vector<State> arguments;
::std::vector<State> temporaries;
::std::vector<State> variables;
-
+
ValStates() {}
ValStates(size_t n_args, size_t n_temps, size_t n_vars):
arguments(n_args, State::Valid),
@@ -106,11 +106,11 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
variables(n_vars)
{
}
-
+
bool empty() const {
return arguments.empty() && temporaries.empty() && variables.empty();
}
-
+
bool merge(ValStates& other)
{
if( this->empty() )
@@ -131,7 +131,7 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
return rv;
}
}
-
+
void mark_validity(const ::MIR::TypeResolve& state, const ::MIR::LValue& lv, bool is_valid)
{
TU_MATCH_DEF( ::MIR::LValue, (lv), (e),
@@ -228,7 +228,7 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
auto val_state = mv$( to_visit_blocks.back().second );
to_visit_blocks.pop_back();
assert(block < fcn.blocks.size());
-
+
// 1. Apply current state to `block_start_states` (merging if needed)
// - If no change happened, skip.
if( ! block_start_states.at(block).merge( val_state ) ) {
@@ -241,7 +241,7 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
{
const auto& stmt = bb.statements[stmt_idx];
state.set_cur_stmt(block, stmt_idx);
-
+
if( stmt.is_Drop() )
{
// Invalidate the slot
@@ -366,14 +366,14 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
{
const auto& stmt = bb.statements[stmt_idx];
state.set_cur_stmt(bb_idx, stmt_idx);
-
+
switch( stmt.tag() )
{
case ::MIR::Statement::TAGDEAD:
throw "";
case ::MIR::Statement::TAG_Assign: {
const auto& a = stmt.as_Assign();
-
+
auto check_type = [&](const auto& src_ty) {
::HIR::TypeRef tmp;
const auto& dst_ty = state.get_lvalue_type(tmp, a.dst);
@@ -528,7 +528,7 @@ void MIR_Validate(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
break;
}
}
-
+
state.set_cur_stmt_term(bb_idx);
TU_MATCH(::MIR::Terminator, (bb.terminator), (e),
(Incomplete,
diff --git a/src/mir/cleanup.cpp b/src/mir/cleanup.cpp
index 972914a6..5744a033 100644
--- a/src/mir/cleanup.cpp
+++ b/src/mir/cleanup.cpp
@@ -23,32 +23,32 @@ struct MirMutator
unsigned int cur_block;
unsigned int cur_stmt;
mutable ::std::vector< ::MIR::Statement> new_statements;
-
+
MirMutator(::MIR::Function& fcn, unsigned int bb, unsigned int stmt):
m_fcn(fcn),
cur_block(bb), cur_stmt(stmt)
{
}
-
+
::MIR::LValue new_temporary(::HIR::TypeRef ty)
{
auto rv = ::MIR::LValue::make_Temporary({ static_cast<unsigned int>(m_fcn.temporaries.size()) });
m_fcn.temporaries.push_back( mv$(ty) );
return rv;
}
-
+
void push_statement(::MIR::Statement stmt)
{
new_statements.push_back( mv$(stmt) );
}
-
+
::MIR::LValue in_temporary(::HIR::TypeRef ty, ::MIR::RValue val)
{
auto rv = this->new_temporary( mv$(ty) );
push_statement( ::MIR::Statement::make_Assign({ rv.clone(), mv$(val) }) );
return rv;
}
-
+
decltype(new_statements.begin()) flush()
{
DEBUG("flush - " << cur_block << "/" << cur_stmt);
@@ -101,37 +101,37 @@ const ::HIR::Literal* MIR_Cleanup_GetConstant(const Span& sp, const StaticTraitR
MIR_ASSERT(state, lit.is_List(), "Non-list literal for Tuple - " << lit);
const auto& vals = lit.as_List();
MIR_ASSERT(state, vals.size() == te.size(), "Literal size mismatched with tuple size");
-
+
::std::vector< ::MIR::LValue> lvals;
lvals.reserve( vals.size() );
-
+
for(unsigned int i = 0; i < vals.size(); i ++)
{
auto rval = MIR_Cleanup_LiteralToRValue(state, mutator, vals[i], te[i].clone(), ::HIR::GenericPath());
lvals.push_back( mutator.in_temporary( mv$(te[i]), mv$(rval)) );
}
-
+
return ::MIR::RValue::make_Tuple({ mv$(lvals) });
),
(Array,
MIR_ASSERT(state, lit.is_List(), "Non-list literal for Array - " << lit);
const auto& vals = lit.as_List();
-
+
MIR_ASSERT(state, vals.size() == te.size_val, "Literal size mismatched with array size");
-
+
bool is_all_same = false;
if( vals.size() > 1 )
{
is_all_same = true;
for(unsigned int i = 1; i < vals.size(); i ++) {
-
+
if( vals[i] != vals[0] ) {
is_all_same = false;
break ;
}
}
}
-
+
if( is_all_same )
{
auto rval = MIR_Cleanup_LiteralToRValue(state, mutator, vals[0], te.inner->clone(), ::HIR::GenericPath());
@@ -142,13 +142,13 @@ const ::HIR::Literal* MIR_Cleanup_GetConstant(const Span& sp, const StaticTraitR
{
::std::vector< ::MIR::LValue> lvals;
lvals.reserve( vals.size() );
-
+
for(const auto& val: vals)
{
auto rval = MIR_Cleanup_LiteralToRValue(state, mutator, val, te.inner->clone(), ::HIR::GenericPath());
lvals.push_back( mutator.in_temporary(te.inner->clone(), mv$(rval)) );
}
-
+
return ::MIR::RValue::make_Array({ mv$(lvals) });
}
),
@@ -157,9 +157,9 @@ const ::HIR::Literal* MIR_Cleanup_GetConstant(const Span& sp, const StaticTraitR
{
const auto& str = *te.binding.as_Struct();
const auto& vals = lit.as_List();
-
+
auto monomorph = [&](const auto& tpl) { return monomorphise_type(state.sp, str.m_params, te.path.m_data.as_Generic().m_params, tpl); };
-
+
::std::vector< ::MIR::LValue> lvals;
TU_MATCHA( (str.m_data), (se),
(Unit,
@@ -190,9 +190,9 @@ const ::HIR::Literal* MIR_Cleanup_GetConstant(const Span& sp, const StaticTraitR
{
const auto& enm = *te.binding.as_Enum();
const auto& lit_var = lit.as_Variant();
-
+
auto monomorph = [&](const auto& tpl) { return monomorphise_type(state.sp, enm.m_params, te.path.m_data.as_Generic().m_params, tpl); };
-
+
::std::vector< ::MIR::LValue> lvals;
MIR_ASSERT(state, lit_var.idx < enm.m_variants.size(), "Variant index out of range");
TU_MATCHA( (enm.m_variants[lit_var.idx].second), (ve),
@@ -254,7 +254,7 @@ const ::HIR::Literal* MIR_Cleanup_GetConstant(const Span& sp, const StaticTraitR
),
(Pointer,
if( lit.is_BorrowOf() ) {
- // TODO:
+ // TODO:
MIR_TODO(state, "BorrowOf into pointer - " << lit << " into " << ty);
}
else {
@@ -272,11 +272,11 @@ const ::HIR::Literal* MIR_Cleanup_GetConstant(const Span& sp, const StaticTraitR
const auto& ty = state.get_static_type(tmp, path);
MIR_ASSERT(state, ty.m_data.is_Array(), "BorrowOf returning slice not of an array, instead " << ty);
unsigned int size = ty.m_data.as_Array().size_val;
-
+
auto ptr_type = ::HIR::TypeRef::new_borrow(::HIR::BorrowType::Shared,
(&ty == &tmp ? mv$(tmp) : ty.clone())
);
-
+
auto ptr_lval = mutator.in_temporary( mv$(ptr_type), ::MIR::Constant::make_ItemAddr(path.clone()) );
auto size_lval = mutator.in_temporary( ::HIR::CoreType::Usize, ::MIR::Constant::make_Uint(size) );
return ::MIR::RValue::make_MakeDst({ mv$(ptr_lval), mv$(size_lval) });
@@ -317,7 +317,7 @@ const ::HIR::Literal* MIR_Cleanup_GetConstant(const Span& sp, const StaticTraitR
{
assert( te.m_trait.m_trait_ptr );
const auto& trait = *te.m_trait.m_trait_ptr;
-
+
// 1. Get the vtable index for this function
auto it = trait.m_value_indexes.find( pe.item );
while( it != trait.m_value_indexes.end() )
@@ -333,7 +333,7 @@ const ::HIR::Literal* MIR_Cleanup_GetConstant(const Span& sp, const StaticTraitR
if( it == trait.m_value_indexes.end() || it->first != pe.item )
BUG(sp, "Calling method '" << pe.item << "' from " << pe.trait << " through " << te.m_trait.m_path << " which isn't in the vtable");
unsigned int vtable_idx = it->second.first;
-
+
// 2. Load from the vtable
auto vtable_ty_spath = te.m_trait.m_path.m_path;
vtable_ty_spath.m_components.back() += "#vtable";
@@ -351,15 +351,15 @@ const ::HIR::Literal* MIR_Cleanup_GetConstant(const Span& sp, const StaticTraitR
::HIR::BorrowType::Shared,
::HIR::TypeRef( ::HIR::GenericPath(vtable_ty_spath, mv$(vtable_params)), &vtable_ref )
);
-
+
// Allocate a temporary for the vtable pointer itself
auto vtable_lv = mutator.new_temporary( mv$(vtable_ty) );
// - Load the vtable and store it
auto vtable_rval = ::MIR::RValue::make_DstMeta({ ::MIR::LValue::make_Deref({ box$(receiver_lvp.clone()) }) });
mutator.push_statement( ::MIR::Statement::make_Assign({ vtable_lv.clone(), mv$(vtable_rval) }) );
-
+
auto fcn_lval = ::MIR::LValue::make_Field({ box$(::MIR::LValue::make_Deref({ box$(vtable_lv) })), vtable_idx });
-
+
::HIR::TypeRef tmp;
const auto& ty = state.get_lvalue_type(tmp, fcn_lval);
const auto& receiver = ty.m_data.as_Function().m_arg_types.at(0);
@@ -368,7 +368,7 @@ const ::HIR::Literal* MIR_Cleanup_GetConstant(const Span& sp, const StaticTraitR
// TODO: If the receiver is Box, create a Box<()> as the value.
// - Requires de/restructuring the Box same as CoerceUnsized
// - Can use the `coerce_unsized_index` field too
-
+
struct H {
static ::MIR::LValue get_unit_ptr(const ::MIR::TypeResolve& state, MirMutator& mutator, ::HIR::TypeRef ty, ::MIR::LValue lv)
{
@@ -407,7 +407,7 @@ const ::HIR::Literal* MIR_Cleanup_GetConstant(const Span& sp, const StaticTraitR
}
)
)
-
+
auto new_path = ty_path.clone();
return mutator.in_temporary( mv$(ty), ::MIR::RValue::make_Struct({ mv$(new_path), ~0u, mv$(vals) }) );
}
@@ -424,18 +424,18 @@ const ::HIR::Literal* MIR_Cleanup_GetConstant(const Span& sp, const StaticTraitR
}
}
};
-
+
receiver_lvp = H::get_unit_ptr(state,mutator, receiver.clone(), receiver_lvp.clone());
}
else
{
auto ptr_rval = ::MIR::RValue::make_DstPtr({ ::MIR::LValue::make_Deref({ box$(receiver_lvp.clone()) }) });
-
+
auto ptr_lv = mutator.new_temporary( ::HIR::TypeRef::new_pointer(::HIR::BorrowType::Shared, ::HIR::TypeRef::new_unit()) );
mutator.push_statement( ::MIR::Statement::make_Assign({ ptr_lv.clone(), mv$(ptr_rval) }) );
receiver_lvp = mv$(ptr_lv);
}
-
+
// Update the terminator with the new information.
return fcn_lval;
}
@@ -457,7 +457,7 @@ bool MIR_Cleanup_Unsize_GetMetadata(const ::MIR::TypeResolve& state, MirMutator&
// Source must be Path and Unsize
if( de.binding.is_Opaque() )
return false;
-
+
MIR_ASSERT(state, src_ty.m_data.is_Path(), "Unsize to path from non-path - " << src_ty);
const auto& se = src_ty.m_data.as_Path();
MIR_ASSERT(state, de.binding.tag() == se.binding.tag(), "Unsize between mismatched types - " << dst_ty << " and " << src_ty);
@@ -465,10 +465,10 @@ bool MIR_Cleanup_Unsize_GetMetadata(const ::MIR::TypeResolve& state, MirMutator&
MIR_ASSERT(state, de.binding.as_Struct() == se.binding.as_Struct(), "Unsize between mismatched types - " << dst_ty << " and " << src_ty);
const auto& str = *de.binding.as_Struct();
MIR_ASSERT(state, str.m_markings.unsized_field != ~0u, "Unsize on type that doesn't implement have a ?Sized field - " << dst_ty);
-
+
auto monomorph_cb_d = monomorphise_type_get_cb(state.sp, nullptr, &de.path.m_data.as_Generic().m_params, nullptr);
auto monomorph_cb_s = monomorphise_type_get_cb(state.sp, nullptr, &se.path.m_data.as_Generic().m_params, nullptr);
-
+
// Return GetMetadata on the inner type
TU_MATCHA( (str.m_data), (se),
(Unit,
@@ -478,14 +478,14 @@ bool MIR_Cleanup_Unsize_GetMetadata(const ::MIR::TypeResolve& state, MirMutator&
const auto& ty_tpl = se.at( str.m_markings.unsized_field ).ent;
auto ty_d = monomorphise_type_with(state.sp, ty_tpl, monomorph_cb_d, false);
auto ty_s = monomorphise_type_with(state.sp, ty_tpl, monomorph_cb_s, false);
-
+
return MIR_Cleanup_Unsize_GetMetadata(state, mutator, ty_d, ty_s, ptr_value, out_meta_val,out_meta_ty,out_src_is_dst);
),
(Named,
const auto& ty_tpl = se.at( str.m_markings.unsized_field ).second.ent;
auto ty_d = monomorphise_type_with(state.sp, ty_tpl, monomorph_cb_d, false);
auto ty_s = monomorphise_type_with(state.sp, ty_tpl, monomorph_cb_s, false);
-
+
return MIR_Cleanup_Unsize_GetMetadata(state, mutator, ty_d, ty_s, ptr_value, out_meta_val,out_meta_ty,out_src_is_dst);
)
)
@@ -504,9 +504,9 @@ bool MIR_Cleanup_Unsize_GetMetadata(const ::MIR::TypeResolve& state, MirMutator&
{
// HACK: FixedSizeArray uses `A: Unsize<[T]>` which will lead to the above code not working (as the size isn't known).
// - Maybe _Meta on the `&A` would work as a stopgap (since A: Sized, it won't collide with &[T] or similar)
-
+
return false;
-
+
//out_meta_ty = ::HIR::CoreType::Usize;
//out_meta_val = ::MIR::RValue::make_DstMeta({ ptr_value.clone() });
//return true;
@@ -517,9 +517,9 @@ bool MIR_Cleanup_Unsize_GetMetadata(const ::MIR::TypeResolve& state, MirMutator&
}
),
(TraitObject,
-
+
auto ty_unit_ptr = ::HIR::TypeRef::new_pointer(::HIR::BorrowType::Shared, ::HIR::TypeRef::new_unit());
-
+
// No data trait, vtable is a null unit pointer.
// - Shouldn't the vtable be just unit?
// - Codegen assumes it's a pointer.
@@ -548,9 +548,9 @@ bool MIR_Cleanup_Unsize_GetMetadata(const ::MIR::TypeResolve& state, MirMutator&
vtable_params.m_types[idx] = ty_b.second.clone();
}
auto vtable_type = ::HIR::TypeRef( ::HIR::GenericPath(vtable_ty_spath, mv$(vtable_params)), &vtable_ref );
-
+
out_meta_ty = ::HIR::TypeRef::new_pointer(::HIR::BorrowType::Shared, mv$(vtable_type));
-
+
// If the data trait hasn't changed, return the vtable pointer
if( src_ty.m_data.is_TraitObject() )
{
@@ -572,7 +572,7 @@ bool MIR_Cleanup_Unsize_GetMetadata(const ::MIR::TypeResolve& state, MirMutator&
::MIR::RValue MIR_Cleanup_Unsize(const ::MIR::TypeResolve& state, MirMutator& mutator, const ::HIR::TypeRef& dst_ty, const ::HIR::TypeRef& src_ty_inner, ::MIR::LValue ptr_value)
{
const auto& dst_ty_inner = (dst_ty.m_data.is_Borrow() ? *dst_ty.m_data.as_Borrow().inner : *dst_ty.m_data.as_Pointer().inner);
-
+
::HIR::TypeRef meta_type;
::MIR::RValue meta_value;
bool source_is_dst = false;
@@ -585,7 +585,7 @@ bool MIR_Cleanup_Unsize_GetMetadata(const ::MIR::TypeResolve& state, MirMutator&
auto ty_unit_ptr = ::HIR::TypeRef::new_pointer(::HIR::BorrowType::Shared, ::HIR::TypeRef::new_unit());
auto deref_ptr_val = ::MIR::LValue::make_Deref({ box$(ptr_value) });
auto thin_ptr_lval = mutator.in_temporary( mv$(ty_unit_ptr), ::MIR::RValue::make_DstPtr({ mv$(deref_ptr_val) }) );
-
+
return ::MIR::RValue::make_MakeDst({ mv$(thin_ptr_lval), mv$(meta_lval) });
}
else
@@ -610,7 +610,7 @@ bool MIR_Cleanup_Unsize_GetMetadata(const ::MIR::TypeResolve& state, MirMutator&
MIR_ASSERT(state, src_ty.m_data.is_Path(), "CoerceUnsized to Path must have a Path source - " << src_ty << " to " << dst_ty);
const auto& dte = dst_ty.m_data.as_Path();
const auto& ste = src_ty.m_data.as_Path();
-
+
// - Types must differ only by a single field, and be from the same definition
MIR_ASSERT(state, dte.binding.is_Struct(), "Note, can't CoerceUnsized non-structs");
MIR_ASSERT(state, dte.binding.tag() == ste.binding.tag(),
@@ -620,10 +620,10 @@ bool MIR_Cleanup_Unsize_GetMetadata(const ::MIR::TypeResolve& state, MirMutator&
const auto& str = *dte.binding.as_Struct();
MIR_ASSERT(state, str.m_markings.coerce_unsized_index != ~0u,
"Struct " << src_ty << " doesn't impl CoerceUnsized");
-
+
auto monomorph_cb_d = monomorphise_type_get_cb(state.sp, nullptr, &dte.path.m_data.as_Generic().m_params, nullptr);
auto monomorph_cb_s = monomorphise_type_get_cb(state.sp, nullptr, &ste.path.m_data.as_Generic().m_params, nullptr);
-
+
// - Destructure and restrucure with the unsized fields
::std::vector<::MIR::LValue> ents;
TU_MATCHA( (str.m_data), (se),
@@ -638,19 +638,19 @@ bool MIR_Cleanup_Unsize_GetMetadata(const ::MIR::TypeResolve& state, MirMutator&
{
auto ty_d = monomorphise_type_with(state.sp, se[i].ent, monomorph_cb_d, false);
auto ty_s = monomorphise_type_with(state.sp, se[i].ent, monomorph_cb_s, false);
-
+
auto new_rval = MIR_Cleanup_CoerceUnsized(state, mutator, ty_d, ty_s, ::MIR::LValue::make_Field({ box$(value.clone()), i }));
auto new_lval = mutator.in_temporary( mv$(ty_d), mv$(new_rval) );
-
+
ents.push_back( mv$(new_lval) );
}
else if( state.m_resolve.is_type_phantom_data( se[i].ent ) )
{
auto ty_d = monomorphise_type_with(state.sp, se[i].ent, monomorph_cb_d, false);
-
+
auto new_rval = ::MIR::RValue::make_Cast({ ::MIR::LValue::make_Field({ box$(value.clone()), i }), ty_d.clone() });
auto new_lval = mutator.in_temporary( mv$(ty_d), mv$(new_rval) );
-
+
ents.push_back( mv$(new_lval) );
}
else
@@ -667,20 +667,20 @@ bool MIR_Cleanup_Unsize_GetMetadata(const ::MIR::TypeResolve& state, MirMutator&
{
auto ty_d = monomorphise_type_with(state.sp, se[i].second.ent, monomorph_cb_d, false);
auto ty_s = monomorphise_type_with(state.sp, se[i].second.ent, monomorph_cb_s, false);
-
+
auto new_rval = MIR_Cleanup_CoerceUnsized(state, mutator, ty_d, ty_s, ::MIR::LValue::make_Field({ box$(value.clone()), i }));
auto new_lval = mutator.new_temporary( mv$(ty_d) );
mutator.push_statement( ::MIR::Statement::make_Assign({ new_lval.clone(), mv$(new_rval) }) );
-
+
ents.push_back( mv$(new_lval) );
}
else if( state.m_resolve.is_type_phantom_data( se[i].second.ent ) )
{
auto ty_d = monomorphise_type_with(state.sp, se[i].second.ent, monomorph_cb_d, false);
-
+
auto new_rval = ::MIR::RValue::make_Cast({ ::MIR::LValue::make_Field({ box$(value.clone()), i }), ty_d.clone() });
auto new_lval = mutator.in_temporary( mv$(ty_d), mv$(new_rval) );
-
+
ents.push_back( mv$(new_lval) );
}
else
@@ -692,22 +692,22 @@ bool MIR_Cleanup_Unsize_GetMetadata(const ::MIR::TypeResolve& state, MirMutator&
)
return ::MIR::RValue::make_Struct({ dte.path.m_data.as_Generic().clone(), ~0u, mv$(ents) });
}
-
+
if( dst_ty.m_data.is_Borrow() )
{
MIR_ASSERT(state, src_ty.m_data.is_Borrow(), "CoerceUnsized to Borrow must have a Borrow source - " << src_ty << " to " << dst_ty);
const auto& ste = src_ty.m_data.as_Borrow();
-
+
return MIR_Cleanup_Unsize(state, mutator, dst_ty, *ste.inner, mv$(value));
}
-
+
// Pointer Coercion - Downcast and unsize
if( dst_ty.m_data.is_Pointer() )
{
MIR_ASSERT(state, src_ty.m_data.is_Pointer(), "CoerceUnsized to Pointer must have a Pointer source - " << src_ty << " to " << dst_ty);
const auto& dte = dst_ty.m_data.as_Pointer();
const auto& ste = src_ty.m_data.as_Pointer();
-
+
if( dte.type == ste.type )
{
// TODO: Use unsize code above
@@ -717,11 +717,11 @@ bool MIR_Cleanup_Unsize_GetMetadata(const ::MIR::TypeResolve& state, MirMutator&
{
MIR_ASSERT(state, *dte.inner == *ste.inner, "TODO: Can pointer CoerceUnsized unsize? " << src_ty << " to " << dst_ty);
MIR_ASSERT(state, dte.type < ste.type, "CoerceUnsize attempting to raise pointer type");
-
+
return ::MIR::RValue::make_Cast({ mv$(value), dst_ty.clone() });
}
}
-
+
MIR_BUG(state, "Unknown CoerceUnsized target " << dst_ty << " from " << src_ty);
throw "";
}
@@ -788,7 +788,7 @@ void MIR_Cleanup_LValue(const ::MIR::TypeResolve& state, MirMutator& mutator, ::
)
tmp = monomorphise_type(state.sp, str.m_params, te.path.m_data.as_Generic().m_params, *ty_tpl);
typ = &tmp;
-
+
auto new_lval = ::MIR::LValue::make_Field({ mv$(le.val), 0 });
le.val = box$(new_lval);
}
@@ -803,7 +803,7 @@ void MIR_Cleanup(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path,
Span sp;
TRACE_FUNCTION_F(path);
::MIR::TypeResolve state { sp, resolve, FMT_CB(ss, ss << path;), ret_type, args, fcn };
-
+
MirMutator mutator { fcn, 0, 0 };
for(auto& block : fcn.blocks)
{
@@ -811,7 +811,7 @@ void MIR_Cleanup(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path,
{
state.set_cur_stmt( mutator.cur_block, mutator.cur_stmt );
auto& stmt = *it;
-
+
// 1. Visit all LValues for box deref hackery
TU_MATCHA( (stmt), (se),
(Drop,
@@ -874,7 +874,7 @@ void MIR_Cleanup(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path,
if( stmt.is_Assign() )
{
auto& se = stmt.as_Assign();
-
+
TU_IFLET( ::MIR::RValue, se.src, Constant, e,
// TODO: Replace `Const` with actual values
TU_IFLET( ::MIR::Constant, e, Const, ce,
@@ -887,14 +887,14 @@ void MIR_Cleanup(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path,
}
)
)
-
+
if( se.src.is_Borrow() && se.src.as_Borrow().val.is_Field() )
{
auto& e = se.src.as_Borrow();
// TODO: If borrowing a !Sized value via a field access, create the DST
::HIR::TypeRef tmp;
const auto& src_ty = state.get_lvalue_type(tmp, e.val);
-
+
if( !resolve.type_is_sized(sp, src_ty) && !src_ty.m_data.is_Generic() )
{
auto ty_unit_ptr = ::HIR::TypeRef::new_borrow( e.type, ::HIR::TypeRef::new_unit() );
@@ -909,24 +909,24 @@ void MIR_Cleanup(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path,
while( lv->is_Field() )
lv = &*lv->as_Field().val;
MIR_ASSERT(state, lv->is_Deref(), "Access of !Sized field not via a deref");
-
+
const auto& dst_val_lval = *lv;
-
+
auto meta_rval = ::MIR::RValue::make_DstMeta({ dst_val_lval.clone() });
// TODO: How can the thin pointer to the field be obtained without tripping this twice?
auto ptr_rval = mv$( se.src );
-
+
// TODO: Get the metadata type.
auto meta_ty = ::HIR::TypeRef( ::HIR::CoreType::Usize );
auto meta_lval = mutator.in_temporary( mv$(meta_ty), mv$(meta_rval) );
-
+
// HACK: Store the pointer as &()
auto ptr_lval = mutator.in_temporary( mv$(ty_unit_ptr), mv$(ptr_rval) );
se.src = ::MIR::RValue::make_MakeDst({ mv$(ptr_lval), mv$(meta_lval) });
}
}
}
-
+
if( se.src.is_Cast() )
{
auto& e = se.src.as_Cast();
@@ -943,7 +943,7 @@ void MIR_Cleanup(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path,
// Casts to PhantomData are only valid from PhandomData, and are added by _CoerceUnsized
else if( state.m_resolve.is_type_phantom_data(e.type) )
{
- // Leave
+ // Leave
MIR_ASSERT(state, state.m_resolve.is_type_phantom_data(src_ty) != nullptr, "PhandomData can only cast from PhantomData");
}
// - CoerceUnsized should re-create the inner type if known.
@@ -966,15 +966,15 @@ void MIR_Cleanup(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path,
}
}
}
-
+
DEBUG(it - block.statements.begin());
it = mutator.flush();
DEBUG(it - block.statements.begin());
mutator.cur_stmt += 1;
}
-
+
state.set_cur_stmt_term( mutator.cur_block );
-
+
TU_MATCHA( (block.terminator), (e),
(Incomplete,
),
@@ -1001,9 +1001,9 @@ void MIR_Cleanup(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path,
MIR_Cleanup_LValue(state, mutator, lv);
)
)
-
+
TU_IFLET( ::MIR::Terminator, block.terminator, Call, e,
-
+
TU_IFLET( ::MIR::CallTarget, e.fcn, Path, path,
// Detect calling `<Trait as Trait>::method()` and replace with vtable call
if( path.m_data.is_UfcsKnown() && path.m_data.as_UfcsKnown().type->m_data.is_TraitObject() )
@@ -1024,7 +1024,7 @@ void MIR_Cleanup(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path,
e.fcn = mv$(tgt_lvalue);
}
}
-
+
if( path.m_data.is_UfcsKnown() && path.m_data.as_UfcsKnown().type->m_data.is_Function() )
{
const auto& pe = path.m_data.as_UfcsKnown();
@@ -1034,9 +1034,9 @@ void MIR_Cleanup(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path,
MIR_ASSERT(state, e.args.size() == 2, "Fn* call requires two arguments");
auto fcn_lvalue = mv$(e.args[0]);
auto args_lvalue = mv$(e.args[1]);
-
+
DEBUG("Convert function pointer call");
-
+
e.args.clear();
e.args.reserve( fcn_ty.m_arg_types.size() );
for(unsigned int i = 0; i < fcn_ty.m_arg_types.size(); i ++)
@@ -1048,7 +1048,7 @@ void MIR_Cleanup(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path,
}
)
)
-
+
mutator.flush();
mutator.cur_block += 1;
mutator.cur_stmt = 0;
diff --git a/src/mir/dump.cpp b/src/mir/dump.cpp
index d1fdc503..555093f2 100644
--- a/src/mir/dump.cpp
+++ b/src/mir/dump.cpp
@@ -10,14 +10,14 @@
#include "mir.hpp"
namespace {
-
+
class TreeVisitor:
public ::HIR::Visitor
{
::std::ostream& m_os;
unsigned int m_indent_level;
bool m_short_item_name = false;
-
+
public:
TreeVisitor(::std::ostream& os):
m_os(os),
@@ -28,7 +28,7 @@ namespace {
void visit_type_impl(::HIR::TypeImpl& impl) override
{
m_short_item_name = true;
-
+
m_os << indent() << "impl" << impl.m_params.fmt_args() << " " << impl.m_type << "\n";
if( ! impl.m_params.m_bounds.empty() )
{
@@ -39,13 +39,13 @@ namespace {
::HIR::Visitor::visit_type_impl(impl);
dec_indent();
m_os << indent() << "}\n";
-
+
m_short_item_name = false;
}
virtual void visit_trait_impl(const ::HIR::SimplePath& trait_path, ::HIR::TraitImpl& impl) override
{
m_short_item_name = true;
-
+
m_os << indent() << "impl" << impl.m_params.fmt_args() << " " << trait_path << impl.m_trait_args << " for " << impl.m_type << "\n";
if( ! impl.m_params.m_bounds.empty() )
{
@@ -56,28 +56,28 @@ namespace {
::HIR::Visitor::visit_trait_impl(trait_path, impl);
dec_indent();
m_os << indent() << "}\n";
-
+
m_short_item_name = false;
}
void visit_marker_impl(const ::HIR::SimplePath& trait_path, ::HIR::MarkerImpl& impl) override
{
m_short_item_name = true;
-
+
m_os << indent() << "impl" << impl.m_params.fmt_args() << " " << (impl.is_positive ? "" : "!") << trait_path << impl.m_trait_args << " for " << impl.m_type << "\n";
if( ! impl.m_params.m_bounds.empty() )
{
m_os << indent() << " " << impl.m_params.fmt_bounds() << "\n";
}
m_os << indent() << "{ }\n";
-
+
m_short_item_name = false;
}
-
+
// - Type Items
void visit_trait(::HIR::ItemPath p, ::HIR::Trait& item) override
{
m_short_item_name = true;
-
+
m_os << indent() << "trait " << p << item.m_params.fmt_args() << "\n";
if( ! item.m_params.m_bounds.empty() )
{
@@ -88,7 +88,7 @@ namespace {
::HIR::Visitor::visit_trait(p, item);
dec_indent();
m_os << indent() << "}\n";
-
+
m_short_item_name = false;
}
@@ -119,7 +119,7 @@ namespace {
{
m_os << indent() << " " << item.m_params.fmt_bounds() << "\n";
}
-
+
if( item.m_code )
{
m_os << indent() << "{\n";
@@ -133,8 +133,8 @@ namespace {
m_os << indent() << " ;\n";
}
}
-
-
+
+
void dump_mir(const ::MIR::Function& fcn)
{
for(unsigned int i = 0; i < fcn.named_variables.size(); i ++)
@@ -145,19 +145,19 @@ namespace {
{
m_os << indent() << "let tmp$" << i << ": " << fcn.temporaries[i] << ";\n";
}
-
+
#define FMT_M(x) FMT_CB(os, this->fmt_val(os,x);)
for(unsigned int i = 0; i < fcn.blocks.size(); i ++)
{
const auto& block = fcn.blocks[i];
DEBUG("BB" << i);
-
+
m_os << indent() << "bb" << i << ": {\n";
inc_indent();
for(const auto& stmt : block.statements)
{
m_os << indent();
-
+
TU_MATCHA( (stmt), (e),
(Assign,
DEBUG("- Assign " << e.dst << " = " << e.src);
@@ -169,7 +169,7 @@ namespace {
)
)
}
-
+
m_os << indent();
TU_MATCHA( (block.terminator), (e),
(Incomplete,
@@ -217,7 +217,7 @@ namespace {
)
dec_indent();
m_os << indent() << "}\n";
-
+
m_os.flush();
}
#undef FMT
@@ -329,14 +329,14 @@ namespace {
case ::MIR::eBinOp::MUL_OV: os << "MUL_OV"; break;
case ::MIR::eBinOp::DIV_OV: os << "DIV_OV"; break;
//case ::MIR::eBinOp::MOD_OV: os << "MOD_OV"; break;
-
+
case ::MIR::eBinOp::BIT_OR : os << "BIT_OR"; break;
case ::MIR::eBinOp::BIT_AND: os << "BIT_AND"; break;
case ::MIR::eBinOp::BIT_XOR: os << "BIT_XOR"; break;
-
+
case ::MIR::eBinOp::BIT_SHR: os << "BIT_SHR"; break;
case ::MIR::eBinOp::BIT_SHL: os << "BIT_SHL"; break;
-
+
case ::MIR::eBinOp::EQ: os << "EQ"; break;
case ::MIR::eBinOp::NE: os << "NE"; break;
case ::MIR::eBinOp::GT: os << "GT"; break;
@@ -424,7 +424,7 @@ namespace {
void MIR_Dump(::std::ostream& sink, const ::HIR::Crate& crate)
{
TreeVisitor tv { sink };
-
+
tv.visit_crate( const_cast< ::HIR::Crate&>(crate) );
}
diff --git a/src/mir/from_hir.cpp b/src/mir/from_hir.cpp
index 7218563b..e21ca993 100644
--- a/src/mir/from_hir.cpp
+++ b/src/mir/from_hir.cpp
@@ -20,14 +20,14 @@
namespace {
-
+
class ExprVisitor_Conv:
public MirConverter
{
MirBuilder& m_builder;
-
+
const ::std::vector< ::HIR::TypeRef>& m_variable_types;
-
+
struct LoopDesc {
ScopeHandle scope;
::std::string label;
@@ -35,26 +35,26 @@ namespace {
unsigned int next;
};
::std::vector<LoopDesc> m_loop_stack;
-
+
public:
ExprVisitor_Conv(MirBuilder& builder, const ::std::vector< ::HIR::TypeRef>& var_types):
m_builder(builder),
m_variable_types(var_types)
{
}
-
+
void destructure_from(const Span& sp, const ::HIR::Pattern& pat, ::MIR::LValue lval, bool allow_refutable=false) override
{
destructure_from_ex(sp, pat, mv$(lval), (allow_refutable ? 1 : 0));
}
-
+
// Brings variables defined in `pat` into scope
void define_vars_from(const Span& sp, const ::HIR::Pattern& pat) override
{
if( pat.m_binding.is_valid() ) {
m_builder.define_variable( pat.m_binding.m_slot );
}
-
+
TU_MATCHA( (pat.m_data), (e),
(Any,
),
@@ -98,7 +98,7 @@ namespace {
),
(EnumValue,
),
-
+
(EnumTuple,
for(unsigned int i = 0; i < e.sub_patterns.size(); i ++ )
{
@@ -132,7 +132,7 @@ namespace {
)
)
}
-
+
void destructure_from_ex(const Span& sp, const ::HIR::Pattern& pat, ::MIR::LValue lval, int allow_refutable=0) // 1 : yes, 2 : disallow binding
{
if( allow_refutable != 3 && pat.m_binding.is_valid() ) {
@@ -146,7 +146,7 @@ namespace {
// Refutable and binding allowed
destructure_from_ex(sp, pat, lval.clone(), 3);
}
-
+
switch( pat.m_binding.m_type )
{
case ::HIR::PatternBinding::Type::Move:
@@ -168,7 +168,7 @@ namespace {
if( allow_refutable == 3 ) {
allow_refutable = 2;
}
-
+
TU_MATCHA( (pat.m_data), (e),
(Any,
),
@@ -267,7 +267,7 @@ namespace {
else
{
ASSERT_BUG(sp, allow_refutable, "Refutable pattern not expected - " << pat);
-
+
// TODO: Emit code to triple-check the size? Or just assume that match did that correctly.
for(unsigned int i = 0; i < e.sub_patterns.size(); i ++)
{
@@ -320,7 +320,7 @@ namespace {
)
)
}
-
+
// -- ExprVisitor
void visit(::HIR::ExprNode_Block& node) override
{
@@ -329,38 +329,38 @@ namespace {
if( node.m_nodes.size() > 0 )
{
bool diverged = false;
-
+
auto scope = m_builder.new_scope_var(node.span());
-
+
for(unsigned int i = 0; i < node.m_nodes.size() - (node.m_yields_final ? 1 : 0); i ++)
{
auto& subnode = node.m_nodes[i];
const Span& sp = subnode->span();
-
+
auto stmt_scope = m_builder.new_scope_temp(sp);
this->visit_node_ptr(subnode);
if( m_builder.has_result() ) {
// TODO: Drop.
m_builder.get_result(sp);
}
-
+
if( m_builder.block_active() ) {
m_builder.terminate_scope(sp, mv$(stmt_scope));
}
else {
auto _ = mv$(stmt_scope);
-
+
m_builder.set_cur_block( m_builder.new_bb_unlinked() );
diverged = true;
}
}
-
+
// - For the last node, don't bother with a statement scope
if( node.m_yields_final )
{
auto& subnode = node.m_nodes.back();
const Span& sp = subnode->span();
-
+
auto stmt_scope = m_builder.new_scope_temp(sp);
this->visit_node_ptr(subnode);
if( m_builder.has_result() || m_builder.block_active() )
@@ -368,10 +368,10 @@ namespace {
ASSERT_BUG(sp, m_builder.block_active(), "Result yielded, but no active block");
ASSERT_BUG(sp, m_builder.has_result(), "Active block but no result yeilded");
// PROBLEM: This can drop the result before we want to use it.
-
+
auto res = m_builder.get_result(sp);
m_builder.raise_variables(sp, res);
-
+
m_builder.terminate_scope(sp, mv$(stmt_scope));
m_builder.terminate_scope( node.span(), mv$(scope) );
m_builder.set_result( node.span(), mv$(res) );
@@ -407,7 +407,7 @@ namespace {
{
TRACE_FUNCTION_F("_Return");
this->visit_node_ptr(node.m_value);
-
+
m_builder.push_stmt_assign( node.span(), ::MIR::LValue::make_Return({}), m_builder.get_result(node.span()) );
m_builder.terminate_scope_early( node.span(), m_builder.fcn_scope() );
m_builder.end_block( ::MIR::Terminator::make_Return({}) );
@@ -419,11 +419,11 @@ namespace {
if( node.m_value )
{
this->visit_node_ptr(node.m_value);
-
+
if( ! m_builder.block_active() ) {
return ;
}
-
+
if( node.m_pattern.m_binding.is_valid() && node.m_pattern.m_data.is_Any() && node.m_pattern.m_binding.m_type == ::HIR::PatternBinding::Type::Move )
{
m_builder.push_stmt_assign( node.span(), ::MIR::LValue::make_Variable(node.m_pattern.m_binding.m_slot), m_builder.get_result(node.span()) );
@@ -441,12 +441,12 @@ namespace {
auto loop_body_scope = m_builder.new_scope_loop(node.span());
auto loop_block = m_builder.new_bb_linked();
auto loop_next = m_builder.new_bb_unlinked();
-
+
m_loop_stack.push_back( LoopDesc { mv$(loop_body_scope), node.m_label, loop_block, loop_next } );
this->visit_node_ptr(node.m_code);
auto loop_scope = mv$(m_loop_stack.back().scope);
m_loop_stack.pop_back();
-
+
// If there's a stray result, drop it
if( m_builder.has_result() ) {
assert( m_builder.block_active() );
@@ -467,7 +467,7 @@ namespace {
// Terminate scope without emitting cleanup (cleanup was handled by `break`)
m_builder.terminate_scope( node.span(), mv$(loop_scope), false );
}
-
+
if( ! node.m_diverges )
{
DEBUG("- Doesn't diverge");
@@ -478,7 +478,7 @@ namespace {
{
DEBUG("- Diverges");
assert( !m_builder.has_result() );
-
+
m_builder.set_cur_block(loop_next);
m_builder.end_split_arm_early(node.span());
assert( !m_builder.has_result() );
@@ -491,7 +491,7 @@ namespace {
if( m_loop_stack.size() == 0 ) {
BUG(node.span(), "Loop control outside of a loop");
}
-
+
const auto* target_block = &m_loop_stack.back();
if( node.m_label != "" ) {
auto it = ::std::find_if(m_loop_stack.rbegin(), m_loop_stack.rend(), [&](const auto& x){ return x.label == node.m_label; });
@@ -500,7 +500,7 @@ namespace {
}
target_block = &*it;
}
-
+
// TODO: Insert drop of all active scopes within the loop
m_builder.terminate_scope_early( node.span(), target_block->scope );
if( node.m_continue ) {
@@ -510,13 +510,13 @@ namespace {
m_builder.end_block( ::MIR::Terminator::make_Goto(target_block->next) );
}
}
-
+
void visit(::HIR::ExprNode_Match& node) override
{
TRACE_FUNCTION_FR("_Match", "_Match");
this->visit_node_ptr(node.m_value);
auto match_val = m_builder.get_result_in_lvalue(node.m_value->span(), node.m_value->m_res_type);
-
+
if( node.m_arms.size() == 0 ) {
// Nothing
//const auto& ty = node.m_value->m_res_type;
@@ -531,21 +531,21 @@ namespace {
// - Shortcut: Single-arm match
auto& arm = node.m_arms[0];
const auto& pat = arm.m_patterns[0];
-
+
auto scope = m_builder.new_scope_var(arm.m_code->span());
auto tmp_scope = m_builder.new_scope_temp(arm.m_code->span());
this->define_vars_from(node.span(), pat);
// TODO: Do the same shortcut as _Let?
this->destructure_from(node.span(), pat, mv$(match_val));
-
+
// Temp scope.
this->visit_node_ptr(arm.m_code);
-
+
if( m_builder.block_active() ) {
auto res = m_builder.get_result(arm.m_code->span());
m_builder.raise_variables( arm.m_code->span(), res );
m_builder.set_result(arm.m_code->span(), mv$(res));
-
+
m_builder.terminate_scope( node.span(), mv$(tmp_scope) );
m_builder.terminate_scope( node.span(), mv$(scope) );
}
@@ -558,11 +558,11 @@ namespace {
MIR_LowerHIR_Match(m_builder, *this, node, mv$(match_val));
}
} // ExprNode_Match
-
+
void visit(::HIR::ExprNode_If& node) override
{
TRACE_FUNCTION_FR("_If", "_If");
-
+
bool reverse = false;
{
auto* cond_p = &node.m_cond;
@@ -572,11 +572,11 @@ namespace {
cond_p = &cond_uni->m_value;
reverse = !reverse;
}
-
+
this->visit_node_ptr(*cond_p);
}
auto decision_val = m_builder.get_result_in_lvalue(node.m_cond->span(), node.m_cond->m_res_type);
-
+
auto true_branch = m_builder.new_bb_unlinked();
auto false_branch = m_builder.new_bb_unlinked();
auto next_block = m_builder.new_bb_unlinked();
@@ -586,12 +586,12 @@ namespace {
else {
m_builder.end_block( ::MIR::Terminator::make_If({ mv$(decision_val), true_branch, false_branch }) );
}
-
+
auto result_val = m_builder.new_temporary(node.m_res_type);
-
+
// Scope handles cases where one arm moves a value but the other doesn't
auto scope = m_builder.new_scope_split( node.m_true->span() );
-
+
// 'true' branch
{
m_builder.set_cur_block(true_branch);
@@ -605,7 +605,7 @@ namespace {
m_builder.end_split_arm(node.span(), scope, false);
}
}
-
+
// 'false' branch
m_builder.set_cur_block(false_branch);
if( node.m_false )
@@ -630,10 +630,10 @@ namespace {
}
m_builder.set_cur_block(next_block);
m_builder.terminate_scope( node.span(), mv$(scope) );
-
+
m_builder.set_result( node.span(), mv$(result_val) );
}
-
+
void generate_checked_binop(const Span& sp, ::MIR::LValue res_slot, ::MIR::eBinOp op, ::MIR::LValue val_l, const ::HIR::TypeRef& ty_l, ::MIR::LValue val_r, const ::HIR::TypeRef& ty_r)
{
switch(op)
@@ -726,29 +726,29 @@ namespace {
break;
}
}
-
+
void visit(::HIR::ExprNode_Assign& node) override
{
TRACE_FUNCTION_F("_Assign");
const auto& sp = node.span();
-
+
this->visit_node_ptr(node.m_value);
::MIR::RValue val = m_builder.get_result(sp);
-
+
this->visit_node_ptr(node.m_slot);
auto dst = m_builder.get_result_unwrap_lvalue(sp);
-
+
const auto& ty_slot = node.m_slot->m_res_type;
const auto& ty_val = node.m_value->m_res_type;
-
+
if( node.m_op != ::HIR::ExprNode_Assign::Op::None )
{
auto dst_clone = dst.clone();
auto val_lv = m_builder.lvalue_or_temp( node.span(), ty_val, mv$(val) );
-
+
ASSERT_BUG(sp, ty_slot.m_data.is_Primitive(), "Assignment operator overloads are only valid on primitives - ty_slot="<<ty_slot);
ASSERT_BUG(sp, ty_val.m_data.is_Primitive(), "Assignment operator overloads are only valid on primitives - ty_val="<<ty_val);
-
+
#define _(v) ::HIR::ExprNode_Assign::Op::v
::MIR::eBinOp op;
switch(node.m_op)
@@ -783,20 +783,20 @@ namespace {
}
m_builder.set_result(node.span(), ::MIR::RValue::make_Tuple({}));
}
-
+
void visit(::HIR::ExprNode_BinOp& node) override
{
const auto& sp = node.span();
TRACE_FUNCTION_F("_BinOp");
-
+
const auto& ty_l = node.m_left->m_res_type;
this->visit_node_ptr(node.m_left);
auto left = m_builder.get_result_in_lvalue(node.m_left->span(), ty_l);
-
+
const auto& ty_r = node.m_right->m_res_type;
this->visit_node_ptr(node.m_right);
auto right = m_builder.get_result_in_lvalue(node.m_right->span(), ty_r);
-
+
auto res = m_builder.new_temporary(node.m_res_type);
::MIR::eBinOp op;
switch(node.m_op)
@@ -809,18 +809,18 @@ namespace {
case ::HIR::ExprNode_BinOp::Op::CmpGtE: op = ::MIR::eBinOp::GE;
this->generate_checked_binop(sp, res.clone(), op, mv$(left), ty_l, mv$(right), ty_r);
break;
-
+
case ::HIR::ExprNode_BinOp::Op::Xor: op = ::MIR::eBinOp::BIT_XOR; if(0)
case ::HIR::ExprNode_BinOp::Op::Or : op = ::MIR::eBinOp::BIT_OR ; if(0)
case ::HIR::ExprNode_BinOp::Op::And: op = ::MIR::eBinOp::BIT_AND;
this->generate_checked_binop(sp, res.clone(), op, mv$(left), ty_l, mv$(right), ty_r);
break;
-
+
case ::HIR::ExprNode_BinOp::Op::Shr: op = ::MIR::eBinOp::BIT_SHR; if(0)
case ::HIR::ExprNode_BinOp::Op::Shl: op = ::MIR::eBinOp::BIT_SHL;
this->generate_checked_binop(sp, res.clone(), op, mv$(left), ty_l, mv$(right), ty_r);
break;
-
+
case ::HIR::ExprNode_BinOp::Op::Add: op = ::MIR::eBinOp::ADD; if(0)
case ::HIR::ExprNode_BinOp::Op::Sub: op = ::MIR::eBinOp::SUB; if(0)
case ::HIR::ExprNode_BinOp::Op::Mul: op = ::MIR::eBinOp::MUL; if(0)
@@ -828,7 +828,7 @@ namespace {
case ::HIR::ExprNode_BinOp::Op::Mod: op = ::MIR::eBinOp::MOD;
this->generate_checked_binop(sp, res.clone(), op, mv$(left), ty_l, mv$(right), ty_r);
break;
-
+
case ::HIR::ExprNode_BinOp::Op::BoolAnd: {
auto bb_next = m_builder.new_bb_unlinked();
auto bb_true = m_builder.new_bb_unlinked();
@@ -838,12 +838,12 @@ namespace {
m_builder.set_cur_block( bb_false );
m_builder.push_stmt_assign(node.span(), res.clone(), ::MIR::RValue( ::MIR::Constant::make_Bool(false) ));
m_builder.end_block( ::MIR::Terminator::make_Goto(bb_next) );
-
+
// If left is true, assign result to right
m_builder.set_cur_block( bb_true );
m_builder.push_stmt_assign(node.span(), res.clone(), mv$(right)); // TODO: Right doens't need to be an LValue here.
m_builder.end_block( ::MIR::Terminator::make_Goto(bb_next) );
-
+
m_builder.set_cur_block( bb_next );
} break;
case ::HIR::ExprNode_BinOp::Op::BoolOr: {
@@ -855,26 +855,26 @@ namespace {
m_builder.set_cur_block( bb_true );
m_builder.push_stmt_assign(node.span(), res.clone(), ::MIR::RValue( ::MIR::Constant::make_Bool(true) ));
m_builder.end_block( ::MIR::Terminator::make_Goto(bb_next) );
-
+
// If left is false, assign result to right
m_builder.set_cur_block( bb_false );
m_builder.push_stmt_assign(node.span(), res.clone(), mv$(right)); // TODO: Right doens't need to be an LValue here.
m_builder.end_block( ::MIR::Terminator::make_Goto(bb_next) );
-
+
m_builder.set_cur_block( bb_next );
} break;
}
m_builder.set_result( node.span(), mv$(res) );
}
-
+
void visit(::HIR::ExprNode_UniOp& node) override
{
TRACE_FUNCTION_F("_UniOp");
-
+
const auto& ty_val = node.m_value->m_res_type;
this->visit_node_ptr(node.m_value);
auto val = m_builder.get_result_in_lvalue(node.m_value->span(), ty_val);
-
+
::MIR::RValue res;
switch(node.m_op)
{
@@ -928,11 +928,11 @@ namespace {
void visit(::HIR::ExprNode_Borrow& node) override
{
TRACE_FUNCTION_F("_Borrow");
-
+
const auto& ty_val = node.m_value->m_res_type;
this->visit_node_ptr(node.m_value);
auto val = m_builder.get_result_in_lvalue(node.m_value->span(), ty_val);
-
+
auto res = m_builder.new_temporary(node.m_res_type);
m_builder.push_stmt_assign( node.span(), res.as_Temporary(), ::MIR::RValue::make_Borrow({ 0, node.m_type, mv$(val) }));
m_builder.set_result( node.span(), mv$(res) );
@@ -941,16 +941,16 @@ namespace {
{
TRACE_FUNCTION_F("_Cast");
this->visit_node_ptr(node.m_value);
-
+
const auto& ty_out = node.m_res_type;
const auto& ty_in = node.m_value->m_res_type;
-
+
if( ty_out == ty_in ) {
return ;
}
-
+
auto val = m_builder.get_result_in_lvalue(node.m_value->span(), node.m_value->m_res_type);
-
+
TU_MATCH_DEF( ::HIR::TypeRef::Data, (ty_out.m_data), (de),
(
BUG(node.span(), "Invalid cast to " << ty_out << " from " << ty_in);
@@ -1067,16 +1067,16 @@ namespace {
{
TRACE_FUNCTION_F("_Unsize");
this->visit_node_ptr(node.m_value);
-
+
const auto& ty_out = node.m_res_type;
const auto& ty_in = node.m_value->m_res_type;
-
+
if( ty_out == ty_in ) {
return ;
}
-
+
auto ptr_lval = m_builder.get_result_in_lvalue(node.m_value->span(), node.m_value->m_res_type);
-
+
if( ty_out.m_data.is_Borrow() && ty_in.m_data.is_Borrow() )
{
const auto& oe = ty_out.m_data.as_Borrow();
@@ -1135,14 +1135,14 @@ namespace {
vtable_params.m_types[idx] = ty_b.second.clone();
}
auto vtable_type = ::HIR::TypeRef( ::HIR::GenericPath(vtable_ty_spath, mv$(vtable_params)), &vtable_ref );
-
+
::HIR::Path vtable { ty_in.clone(), e.m_trait.m_path.clone(), "#vtable" };
auto vtable_lval = m_builder.lvalue_or_temp(
node.span(),
::HIR::TypeRef::new_pointer(::HIR::BorrowType::Shared, mv$(vtable_type)),
::MIR::RValue( ::MIR::Constant::make_ItemAddr(mv$(vtable)) )
);
-
+
m_builder.set_result( node.span(), ::MIR::RValue::make_MakeDst({ mv$(ptr_lval), mv$(vtable_lval) }) );
#else
m_builder.set_result( node.span(), ::MIR::RValue::make_Cast({ mv$(ptr_lval), node.m_res_type.clone() }) );
@@ -1164,16 +1164,16 @@ namespace {
void visit(::HIR::ExprNode_Index& node) override
{
TRACE_FUNCTION_F("_Index");
-
+
// NOTE: Calculate the index first (so if it borrows from the source, it's over by the time that's needed)
const auto& ty_idx = node.m_index->m_res_type;
this->visit_node_ptr(node.m_index);
auto index = m_builder.get_result_in_lvalue(node.m_index->span(), ty_idx);
-
+
const auto& ty_val = node.m_value->m_res_type;
this->visit_node_ptr(node.m_value);
auto value = m_builder.get_result_in_lvalue(node.m_value->span(), ty_val);
-
+
::MIR::RValue limit_val;
TU_MATCH_DEF(::HIR::TypeRef::Data, (ty_val.m_data), (e),
(
@@ -1186,7 +1186,7 @@ namespace {
limit_val = ::MIR::RValue::make_DstMeta({ value.clone() });
)
)
-
+
TU_MATCH_DEF(::HIR::TypeRef::Data, (ty_idx.m_data), (e),
(
BUG(node.span(), "Indexing using unsupported index type " << ty_idx);
@@ -1197,38 +1197,38 @@ namespace {
}
)
)
-
+
// Range checking (DISABLED)
if( false )
{
auto limit_lval = m_builder.lvalue_or_temp( node.span(), ty_idx, mv$(limit_val) );
-
+
auto cmp_res = m_builder.new_temporary( ::HIR::CoreType::Bool );
m_builder.push_stmt_assign(node.span(), cmp_res.clone(), ::MIR::RValue::make_BinOp({ index.clone(), ::MIR::eBinOp::GE, mv$(limit_lval) }));
auto arm_panic = m_builder.new_bb_unlinked();
auto arm_continue = m_builder.new_bb_unlinked();
m_builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_res), arm_panic, arm_continue }) );
-
+
m_builder.set_cur_block( arm_panic );
// TODO: Call an "index fail" method which always panics.
//m_builder.end_block( ::MIR::Terminator::make_Panic({}) );
m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
-
+
m_builder.set_cur_block( arm_continue );
}
-
+
m_builder.set_result( node.span(), ::MIR::LValue::make_Index({ box$(value), box$(index) }) );
}
-
+
void visit(::HIR::ExprNode_Deref& node) override
{
const Span& sp = node.span();
TRACE_FUNCTION_F("_Deref");
-
+
const auto& ty_val = node.m_value->m_res_type;
this->visit_node_ptr(node.m_value);
auto val = m_builder.get_result_in_lvalue(node.m_value->span(), ty_val);
-
+
TU_MATCH_DEF( ::HIR::TypeRef::Data, (ty_val.m_data), (te),
(
if( m_builder.is_type_owned_box( ty_val ) )
@@ -1246,10 +1246,10 @@ namespace {
// Deref on a borrow - Always valid... assuming borrowck is there :)
)
)
-
+
m_builder.set_result( node.span(), ::MIR::LValue::make_Deref({ box$(val) }) );
}
-
+
void visit(::HIR::ExprNode_Emplace& node) override
{
if( node.m_type == ::HIR::ExprNode_Emplace::Type::Noop ) {
@@ -1260,9 +1260,9 @@ namespace {
auto path_Place = ::HIR::SimplePath("core", {"ops", "Place"});
auto path_Boxed = ::HIR::SimplePath("core", {"ops", "Boxed"});
//auto path_InPlace = ::HIR::SimplePath("core", {"ops", "InPlace"});
-
+
const auto& data_ty = node.m_value->m_res_type;
-
+
// 1. Obtain the type of the `place` variable
::HIR::TypeRef place_type;
switch( node.m_type )
@@ -1277,7 +1277,7 @@ namespace {
TODO(node.span(), "_Emplace - Placer");
break;
}
-
+
// 2. Initialise the place
auto place = m_builder.new_temporary( place_type );
auto place__panic = m_builder.new_bb_unlinked();
@@ -1299,13 +1299,13 @@ namespace {
TODO(node.span(), "_Emplace - Placer");
break;
}
-
+
// TODO: Proper panic handling, including scope destruction
m_builder.set_cur_block(place__panic);
// TODO: Drop `place`
m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
m_builder.set_cur_block(place__ok);
-
+
// 2. Get `place_raw`
auto place_raw__type = ::HIR::TypeRef::new_pointer(::HIR::BorrowType::Unique, node.m_value->m_res_type.clone());
auto place_raw = m_builder.new_temporary( place_raw__type );
@@ -1322,19 +1322,19 @@ namespace {
::make_vec1( mv$(place_refmut) )
}));
}
-
+
// TODO: Proper panic handling, including scope destruction
m_builder.set_cur_block(place_raw__panic);
// TODO: Drop `place`
m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
m_builder.set_cur_block(place_raw__ok);
-
-
+
+
// 3. Get the value and assign it into `place_raw`
node.m_value->visit(*this);
auto val = m_builder.get_result(node.span());
m_builder.push_stmt_assign( node.span(), ::MIR::LValue::make_Deref({ box$(place_raw.clone()) }), mv$(val) );
-
+
// 3. Return a call to `finalize`
::HIR::Path finalize_path(::HIR::GenericPath {});
switch( node.m_type )
@@ -1348,7 +1348,7 @@ namespace {
TODO(node.span(), "_Emplace - Placer");
break;
}
-
+
auto res = m_builder.new_temporary( node.m_res_type );
auto res__panic = m_builder.new_bb_unlinked();
auto res__ok = m_builder.new_bb_unlinked();
@@ -1357,17 +1357,17 @@ namespace {
res.clone(), mv$(finalize_path),
::make_vec1( mv$(place) )
}));
-
+
// TODO: Proper panic handling, including scope destruction
m_builder.set_cur_block(res__panic);
// TODO: Should this drop the value written to the rawptr?
// - No, becuase it's likely invalid now. Goodbye!
m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
m_builder.set_cur_block(res__ok);
-
+
m_builder.set_result( node.span(), mv$(res) );
}
-
+
void visit(::HIR::ExprNode_TupleVariant& node) override
{
const Span& sp = node.span();
@@ -1379,7 +1379,7 @@ namespace {
this->visit_node_ptr(arg);
values.push_back( m_builder.get_result_in_lvalue(arg->span(), arg->m_res_type) );
}
-
+
unsigned int variant_index = ~0u;
if( !node.m_is_struct )
{
@@ -1390,17 +1390,17 @@ namespace {
const auto& enm = m_builder.crate().get_enum_by_path(sp, enum_path);
auto var_it = ::std::find_if(enm.m_variants.begin(), enm.m_variants.end(), [&](const auto& x){ return x.first == var_name; });
ASSERT_BUG(sp, var_it != enm.m_variants.end(), "Variant " << node.m_path.m_path << " isn't present");
-
+
variant_index = var_it - enm.m_variants.begin();
}
-
+
m_builder.set_result( node.span(), ::MIR::RValue::make_Struct({
node.m_path.clone(),
variant_index,
mv$(values)
}) );
}
-
+
void visit(::HIR::ExprNode_CallPath& node) override
{
TRACE_FUNCTION_F("_CallPath " << node.m_path);
@@ -1412,14 +1412,14 @@ namespace {
values.push_back( m_builder.get_result_in_lvalue(arg->span(), arg->m_res_type) );
m_builder.moved_lvalue( arg->span(), values.back() );
}
-
-
+
+
auto panic_block = m_builder.new_bb_unlinked();
auto next_block = m_builder.new_bb_unlinked();
auto res = m_builder.new_temporary( node.m_res_type );
-
+
bool unconditional_diverge = false;
-
+
// Emit intrinsics as a special call type
if( node.m_path.m_data.is_Generic() )
{
@@ -1433,13 +1433,13 @@ namespace {
mv$(values)
}));
}
-
+
if( fcn.m_return.m_data.is_Diverge() )
{
unconditional_diverge = true;
}
}
-
+
// If the call wasn't to an intrinsic, emit it as a path
if( m_builder.block_active() )
{
@@ -1449,11 +1449,11 @@ namespace {
mv$(values)
}));
}
-
+
m_builder.set_cur_block(panic_block);
// TODO: Proper panic handling, including scope destruction
m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
-
+
m_builder.set_cur_block( next_block );
// If the function doesn't return, early-terminate the return block.
@@ -1469,16 +1469,16 @@ namespace {
}
m_builder.set_result( node.span(), mv$(res) );
}
-
+
void visit(::HIR::ExprNode_CallValue& node) override
{
TRACE_FUNCTION_F("_CallValue " << node.m_value->m_res_type);
-
+
// _CallValue is ONLY valid on function pointers (all others must be desugared)
ASSERT_BUG(node.span(), node.m_value->m_res_type.m_data.is_Function(), "Leftover _CallValue on a non-fn()");
this->visit_node_ptr(node.m_value);
auto fcn_val = m_builder.get_result_in_lvalue( node.m_value->span(), node.m_value->m_res_type );
-
+
::std::vector< ::MIR::LValue> values;
values.reserve( node.m_args.size() );
for(auto& arg : node.m_args)
@@ -1487,8 +1487,8 @@ namespace {
values.push_back( m_builder.get_result_in_lvalue(arg->span(), arg->m_res_type) );
m_builder.moved_lvalue( arg->span(), values.back() );
}
-
-
+
+
auto panic_block = m_builder.new_bb_unlinked();
auto next_block = m_builder.new_bb_unlinked();
auto res = m_builder.new_temporary( node.m_res_type );
@@ -1497,11 +1497,11 @@ namespace {
res.clone(), mv$(fcn_val),
mv$(values)
}));
-
+
m_builder.set_cur_block(panic_block);
// TODO: Proper panic handling
m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
-
+
m_builder.set_cur_block( next_block );
m_builder.set_result( node.span(), mv$(res) );
}
@@ -1515,9 +1515,9 @@ namespace {
TRACE_FUNCTION_F("_Field");
this->visit_node_ptr(node.m_value);
auto val = m_builder.get_result_in_lvalue(node.m_value->span(), node.m_value->m_res_type);
-
+
const auto& val_ty = node.m_value->m_res_type;
-
+
unsigned int idx;
if( '0' <= node.m_field[0] && node.m_field[0] <= '9' ) {
::std::stringstream(node.m_field) >> idx;
@@ -1533,7 +1533,7 @@ namespace {
const auto& unm = *node.m_value->m_res_type.m_data.as_Path().binding.as_Union();
const auto& fields = unm.m_variants;
idx = ::std::find_if( fields.begin(), fields.end(), [&](const auto& x){ return x.first == node.m_field; } ) - fields.begin();
-
+
m_builder.set_result( node.span(), ::MIR::LValue::make_Downcast({ box$(val), idx }) );
}
}
@@ -1595,7 +1595,7 @@ namespace {
const auto& enm = m_builder.crate().get_enum_by_path(sp, enum_path);
auto var_it = ::std::find_if(enm.m_variants.begin(), enm.m_variants.end(), [&](const auto& x){ return x.first == var_name; });
ASSERT_BUG(sp, var_it != enm.m_variants.end(), "Variant " << node.m_path.m_path << " isn't present");
-
+
variant_index = var_it - enm.m_variants.begin();
}
m_builder.set_result( node.span(), ::MIR::RValue::make_Struct({
@@ -1614,13 +1614,13 @@ namespace {
auto enum_path = pe.m_path;
enum_path.m_components.pop_back();
const auto& var_name = pe.m_path.m_components.back();
-
+
const auto& enm = m_builder.crate().get_enum_by_path(sp, enum_path);
auto var_it = ::std::find_if(enm.m_variants.begin(), enm.m_variants.end(), [&](const auto& x){ return x.first == var_name; });
ASSERT_BUG(sp, var_it != enm.m_variants.end(), "Variant " << pe.m_path << " isn't present");
const auto& var = var_it->second;
ASSERT_BUG(sp, var.is_Tuple(), "Variant " << pe.m_path << " isn't a tuple variant");
-
+
// TODO: Ideally, the creation of the wrapper function would happen somewhere before this?
auto tmp = m_builder.new_temporary( node.m_res_type );
m_builder.push_stmt_assign( sp, tmp.clone(), ::MIR::Constant::make_ItemAddr(node.m_path.clone()) );
@@ -1670,7 +1670,7 @@ namespace {
BUG(sp, "Unknown param in free function - " << gt);
}
};
-
+
// TODO: Obtain function type for this function (i.e. a type that is specifically for this function)
auto fcn_ty_data = ::HIR::FunctionType {
e.m_unsafe,
@@ -1752,7 +1752,7 @@ namespace {
TRACE_FUNCTION_F("_Variable - " << node.m_name << " #" << node.m_slot);
m_builder.set_result( node.span(), ::MIR::LValue::make_Variable(node.m_slot) );
}
-
+
void visit(::HIR::ExprNode_StructLiteral& node) override
{
TRACE_FUNCTION_F("_StructLiteral");
@@ -1762,7 +1762,7 @@ namespace {
this->visit_node_ptr(node.m_base_value);
base_val = m_builder.get_result_in_lvalue(node.m_base_value->span(), node.m_base_value->m_res_type);
}
-
+
unsigned int variant_index = ~0u;
const ::HIR::t_struct_fields* fields_ptr = nullptr;
TU_MATCH(::HIR::TypeRef::TypePathBinding, (node.m_res_type.m_data.as_Path().binding), (e),
@@ -1785,12 +1785,12 @@ namespace {
)
assert(fields_ptr);
const ::HIR::t_struct_fields& fields = *fields_ptr;
-
+
::std::vector<bool> values_set;
::std::vector< ::MIR::LValue> values;
values.resize( fields.size() );
values_set.resize( fields.size() );
-
+
for(auto& ent : node.m_values)
{
auto& valnode = ent.second;
@@ -1812,7 +1812,7 @@ namespace {
// Partial move support will handle dropping the rest?
}
}
-
+
m_builder.set_result( node.span(), ::MIR::RValue::make_Struct({
node.m_path.clone(),
variant_index,
@@ -1822,10 +1822,10 @@ namespace {
void visit(::HIR::ExprNode_UnionLiteral& node) override
{
TRACE_FUNCTION_F("_UnionLiteral " << node.m_path);
-
+
this->visit_node_ptr(node.m_value);
auto val = m_builder.get_result_in_lvalue(node.m_value->span(), node.m_value->m_res_type);
-
+
const auto& unm = *node.m_res_type.m_data.as_Path().binding.as_Union();
auto it = ::std::find_if(unm.m_variants.begin(), unm.m_variants.end(), [&](const auto&v)->auto{ return v.first == node.m_variant_name; });
assert(it != unm.m_variants.end());
@@ -1837,7 +1837,7 @@ namespace {
mv$(val)
}) );
}
-
+
void visit(::HIR::ExprNode_Tuple& node) override
{
TRACE_FUNCTION_F("_Tuple");
@@ -1848,12 +1848,12 @@ namespace {
this->visit_node_ptr(arg);
values.push_back( m_builder.lvalue_or_temp( arg->span(), arg->m_res_type, m_builder.get_result(arg->span()) ) );
}
-
+
m_builder.set_result( node.span(), ::MIR::RValue::make_Tuple({
mv$(values)
}) );
}
-
+
void visit(::HIR::ExprNode_ArrayList& node) override
{
TRACE_FUNCTION_F("_ArrayList");
@@ -1864,28 +1864,28 @@ namespace {
this->visit_node_ptr(arg);
values.push_back( m_builder.lvalue_or_temp( arg->span(), arg->m_res_type, m_builder.get_result(arg->span()) ) );
}
-
+
m_builder.set_result( node.span(), ::MIR::RValue::make_Array({
mv$(values)
}) );
}
-
+
void visit(::HIR::ExprNode_ArraySized& node) override
{
TRACE_FUNCTION_F("_ArraySized");
this->visit_node_ptr( node.m_val );
auto value = m_builder.lvalue_or_temp( node.span(), node.m_val->m_res_type, m_builder.get_result(node.m_val->span()) );
-
+
m_builder.set_result( node.span(), ::MIR::RValue::make_SizedArray({
mv$(value),
static_cast<unsigned int>(node.m_size_val)
}) );
}
-
+
void visit(::HIR::ExprNode_Closure& node) override
{
TRACE_FUNCTION_F("_Closure - " << node.m_obj_path);
-
+
::std::vector< ::MIR::LValue> vals;
vals.reserve( node.m_captures.size() );
for(auto& arg : node.m_captures)
@@ -1893,7 +1893,7 @@ namespace {
this->visit_node_ptr(arg);
vals.push_back( m_builder.get_result_in_lvalue(arg->span(), arg->m_res_type) );
}
-
+
m_builder.set_result( node.span(), ::MIR::RValue::make_Struct({
node.m_obj_path.clone(),
~0u,
@@ -1907,17 +1907,17 @@ namespace {
::MIR::FunctionPointer LowerMIR(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path, const ::HIR::ExprPtr& ptr, const ::HIR::Function::args_t& args)
{
TRACE_FUNCTION;
-
+
::MIR::Function fcn;
fcn.named_variables.reserve(ptr.m_bindings.size());
for(const auto& t : ptr.m_bindings)
fcn.named_variables.push_back( t.clone() );
-
+
// Scope ensures that builder cleanup happens before `fcn` is moved
{
MirBuilder builder { ptr->span(), resolve, args, fcn };
ExprVisitor_Conv ev { builder, ptr.m_bindings };
-
+
// 1. Apply destructuring to arguments
unsigned int i = 0;
for( const auto& arg : args )
@@ -1926,12 +1926,12 @@ namespace {
ev.destructure_from(ptr->span(), arg.first, ::MIR::LValue::make_Argument({i}));
i ++;
}
-
+
// 2. Destructure code
::HIR::ExprNode& root_node = const_cast<::HIR::ExprNode&>(*ptr);
root_node.visit( ev );
}
-
+
return ::MIR::FunctionPointer(new ::MIR::Function(mv$(fcn)));
}
diff --git a/src/mir/from_hir.hpp b/src/mir/from_hir.hpp
index caeba6ad..7c83b44f 100644
--- a/src/mir/from_hir.hpp
+++ b/src/mir/from_hir.hpp
@@ -16,10 +16,10 @@ class MirBuilder;
class ScopeHandle
{
friend class MirBuilder;
-
+
const MirBuilder& m_builder;
unsigned int idx;
-
+
ScopeHandle(const MirBuilder& builder, unsigned int idx):
m_builder(builder),
idx(idx)
@@ -43,11 +43,11 @@ enum class VarState {
Uninit, // No value assigned yet
Moved, // Definitely moved
Dropped, // Dropped (out of scope)
-
+
InnerMoved, // The inner has been moved, but the container needs to be dropped
//MaybeMovedInner, // Inner possibly has been moved
MaybeMoved, // Possibly has been moved
-
+
Init, // Initialised and valid at this point
};
extern ::std::ostream& operator<<(::std::ostream& os, VarState x);
@@ -57,7 +57,7 @@ struct SplitArm {
bool always_early_terminated = false; // Populated on completion
::std::vector<bool> changed_var_states; // Indexed by binding bumber
::std::vector<VarState> var_states;
-
+
::std::vector<bool> changed_tmp_states;
::std::vector<VarState> tmp_states;
};
@@ -80,31 +80,31 @@ TAGGED_UNION(ScopeType, Variables,
class MirBuilder
{
friend class ScopeHandle;
-
+
const Span& m_root_span;
const StaticTraitResolve& m_resolve;
const ::HIR::Function::args_t& m_args;
::MIR::Function& m_output;
-
+
const ::HIR::SimplePath* m_lang_Box;
-
+
unsigned int m_current_block;
bool m_block_active;
-
+
::MIR::RValue m_result;
bool m_result_valid;
-
+
// TODO: Extra information.
//::std::vector<VarState> m_arg_states;
::std::vector<VarState> m_variable_states;
::std::vector<VarState> m_temporary_states;
-
+
struct ScopeDef
{
const Span& span;
bool complete = false;
ScopeType data;
-
+
ScopeDef(const Span& span):
span(span)
{
@@ -115,27 +115,27 @@ class MirBuilder
{
}
};
-
+
::std::vector<ScopeDef> m_scopes;
::std::vector<unsigned int> m_scope_stack;
ScopeHandle m_fcn_scope;
public:
MirBuilder(const Span& sp, const StaticTraitResolve& resolve, const ::HIR::Function::args_t& args, ::MIR::Function& output);
~MirBuilder();
-
+
const ::HIR::SimplePath* lang_Box() const { return m_lang_Box; }
const ::HIR::Crate& crate() const { return m_resolve.m_crate; }
const StaticTraitResolve& resolve() const { return m_resolve; }
-
+
//::HIR::TypeRef* is_type_owned_box(::HIR::TypeRef& ty) const {
//}
/// Check if the passed type is Box<T> and returns a pointer to the T type if so, otherwise nullptr
const ::HIR::TypeRef* is_type_owned_box(const ::HIR::TypeRef& ty) const;
-
+
// - Values
::MIR::LValue new_temporary(const ::HIR::TypeRef& ty);
::MIR::LValue lvalue_or_temp(const Span& sp, const ::HIR::TypeRef& ty, ::MIR::RValue val);
-
+
bool has_result() const {
return m_result_valid;
}
@@ -145,7 +145,7 @@ public:
::MIR::LValue get_result_unwrap_lvalue(const Span& sp);
/// Obtains the result, copying into a temporary if required
::MIR::LValue get_result_in_lvalue(const Span& sp, const ::HIR::TypeRef& ty);
-
+
// - Statements
// Push an assignment. NOTE: This also marks the rvalue as moved
void push_stmt_assign(const Span& sp, ::MIR::LValue dst, ::MIR::RValue val);
@@ -153,26 +153,26 @@ public:
void push_stmt_drop(const Span& sp, ::MIR::LValue val);
// Push a shallow drop (for Box)
void push_stmt_drop_shallow(const Span& sp, ::MIR::LValue val);
-
+
// - Block management
bool block_active() const {
return m_block_active;
}
-
+
// Mark a value as initialised (used for Call, because it has to be done after the panic block is populated)
void mark_value_assigned(const Span& sp, const ::MIR::LValue& val);
-
+
// Moves control of temporaries up to the next scope
void raise_variables(const Span& sp, const ::MIR::LValue& val);
void raise_variables(const Span& sp, const ::MIR::RValue& rval);
-
+
void set_cur_block(unsigned int new_block);
::MIR::BasicBlockId pause_cur_block();
void end_block(::MIR::Terminator term);
-
+
::MIR::BasicBlockId new_bb_linked();
::MIR::BasicBlockId new_bb_unlinked();
-
+
// --- Scopes ---
ScopeHandle new_scope_var(const Span& sp);
ScopeHandle new_scope_temp(const Span& sp);
@@ -182,7 +182,7 @@ public:
void terminate_scope_early(const Span& sp, const ScopeHandle& );
void end_split_arm(const Span& sp, const ScopeHandle& , bool reachable);
void end_split_arm_early(const Span& sp);
-
+
const ScopeHandle& fcn_scope() const {
return m_fcn_scope;
}
@@ -196,10 +196,10 @@ private:
void set_variable_state(const Span& sp, unsigned int idx, VarState state);
VarState get_temp_state(const Span& sp, unsigned int idx) const;
void set_temp_state(const Span& sp, unsigned int idx, VarState state);
-
+
void drop_scope_values(const ScopeDef& sd);
void complete_scope(ScopeDef& sd);
-
+
public:
void with_val_type(const Span& sp, const ::MIR::LValue& val, ::std::function<void(const ::HIR::TypeRef&)> cb) const;
bool lvalue_is_copy(const Span& sp, const ::MIR::LValue& lv) const;
diff --git a/src/mir/from_hir_match.cpp b/src/mir/from_hir_match.cpp
index f191f3fa..d3c5a5eb 100644
--- a/src/mir/from_hir_match.cpp
+++ b/src/mir/from_hir_match.cpp
@@ -17,14 +17,14 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
struct field_path_t
{
::std::vector<uint8_t> data;
-
+
size_t size() const { return data.size(); }
void push_back(uint8_t v) { data.push_back(v); }
void pop_back() { data.pop_back(); }
uint8_t& back() { return data.back(); }
-
+
bool operator==(const field_path_t& x) const { return data == x.data; }
-
+
friend ::std::ostream& operator<<(::std::ostream& os, const field_path_t& x) {
for(const auto idx : x.data)
os << "." << static_cast<unsigned int>(idx);
@@ -61,9 +61,9 @@ struct PatternRuleset
unsigned int pat_idx;
::std::vector<PatternRule> m_rules;
-
+
static ::Ordering rule_is_before(const PatternRule& l, const PatternRule& r);
-
+
bool is_before(const PatternRuleset& other) const;
};
/// Generated code for an arm
@@ -88,7 +88,7 @@ struct PatternRulesetBuilder
bool m_is_impossible;
::std::vector<PatternRule> m_rules;
field_path_t m_field_path;
-
+
PatternRulesetBuilder(const StaticTraitResolve& resolve):
m_resolve(resolve),
m_is_impossible(false)
@@ -97,7 +97,7 @@ struct PatternRulesetBuilder
m_lang_Box = &resolve.m_crate.m_lang_items.at("owned_box");
}
}
-
+
void append_from_lit(const Span& sp, const ::HIR::Literal& lit, const ::HIR::TypeRef& ty);
void append_from(const Span& sp, const ::HIR::Pattern& pat, const ::HIR::TypeRef& ty);
void push_rule(PatternRule r);
@@ -113,17 +113,17 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
{
// TODO: If any arm moves a non-Copy value, then mark `match_val` as moved
TRACE_FUNCTION;
-
+
bool fall_back_on_simple = false;
-
+
auto result_val = builder.new_temporary( node.m_res_type );
auto next_block = builder.new_bb_unlinked();
-
+
// 1. Stop the current block so we can generate code
auto first_cmp_block = builder.new_bb_unlinked();
builder.end_block( ::MIR::Terminator::make_Goto(first_cmp_block) );
-
+
struct H {
static bool is_pattern_move(const Span& sp, const MirBuilder& builder, const ::HIR::Pattern& pat) {
if( pat.m_binding.is_valid() )
@@ -221,7 +221,7 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
return false;
}
};
-
+
bool has_move_pattern = false;
for(const auto& arm : node.m_arms)
{
@@ -234,7 +234,7 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
if( has_move_pattern )
break ;
}
-
+
auto match_scope = builder.new_scope_split(node.span());
// Map of arm index to ruleset
@@ -245,12 +245,12 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
DEBUG("ARM " << arm_idx);
/*const*/ auto& arm = node.m_arms[arm_idx];
ArmCode ac;
-
+
// Register introduced bindings to be dropped on return/diverge within this scope
auto drop_scope = builder.new_scope_var( arm.m_code->span() );
// - Define variables from the first pattern
conv.define_vars_from(node.span(), arm.m_patterns.front());
-
+
for( unsigned int pat_idx = 0; pat_idx < arm.m_patterns.size(); pat_idx ++ )
{
const auto& pat = arm.m_patterns[pat_idx];
@@ -266,7 +266,7 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
DEBUG("ARM PAT (" << arm_idx << "," << pat_idx << ") " << pat << " ==> [" << pat_builder.m_rules << "]");
arm_rules.push_back( PatternRuleset { arm_idx, pat_idx, mv$(pat_builder.m_rules) } );
}
-
+
// - Emit code to destructure the matched pattern
ac.destructures.push_back( builder.new_bb_unlinked() );
builder.set_cur_block( ac.destructures.back() );
@@ -274,14 +274,14 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
builder.pause_cur_block();
// NOTE: Paused block resumed upon successful match
}
-
+
// TODO: If this pattern ignores fields with Drop impls, this will lead to leaks.
// - Ideally, this would trigger a drop of whatever wasn't already taken by the pattern.
if( has_move_pattern )
{
builder.moved_lvalue(node.span(), match_val);
}
-
+
// Condition
// NOTE: Lack of drop due to early exit from this arm isn't an issue. All captures must be Copy
// - The above is rustc E0008 "cannot bind by-move into a pattern guard"
@@ -291,13 +291,13 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
ac.has_condition = true;
ac.cond_start = builder.new_bb_unlinked();
builder.set_cur_block( ac.cond_start );
-
+
// TODO: Temp scope.
conv.visit_node_ptr( arm.m_cond );
ac.cond_lval = builder.get_result_in_lvalue(arm.m_cond->span(), ::HIR::TypeRef(::HIR::CoreType::Bool));
ac.cond_end = builder.pause_cur_block();
// NOTE: Paused so that later code (which knows what the false branch will be) can end it correctly
-
+
// TODO: What to do with contidionals in the fast model?
// > Could split the match on each conditional - separating such that if a conditional fails it can fall into the other compatible branches.
fall_back_on_simple = true;
@@ -335,10 +335,10 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
// - Go to the next block
builder.end_block( ::MIR::Terminator::make_Goto(next_block) );
}
-
+
arm_code.push_back( mv$(ac) );
}
-
+
// Sort columns of `arm_rules` to maximise effectiveness
if( arm_rules[0].m_rules.size() > 1 )
{
@@ -354,7 +354,7 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
}
}
}
-
+
DEBUG("- Column weights = [" << column_weights << "]");
// - Sort columns such that the largest (most specific) comes first
::std::vector<unsigned> columns_sorted(column_weights.size());
@@ -371,12 +371,12 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
arm_rule.m_rules = mv$(sorted);
}
}
-
+
for(const auto& arm_rule : arm_rules)
{
DEBUG("> (" << arm_rule.arm_idx << ", " << arm_rule.pat_idx << ") - " << arm_rule.m_rules);
}
-
+
// TODO: Detect if a rule is ordering-dependent. In this case we currently have to fall back on the simple match code
// - A way would be to search for `_` rules with non _ rules following. Would false-positive in some cases, but shouldn't false negative
// TODO: Merge equal rulesets if there's one with no condition.
@@ -387,7 +387,7 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
else {
MIR_LowerHIR_Match_DecisionTree( builder, conv, node, mv$(match_val), mv$(arm_rules), mv$(arm_code), first_cmp_block );
}
-
+
builder.set_cur_block( next_block );
builder.set_result( node.span(), mv$(result_val) );
builder.terminate_scope( node.span(), mv$(match_scope) );
@@ -439,7 +439,7 @@ void MIR_LowerHIR_Match( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNod
else
return ::OrdLess;
}
-
+
TU_MATCHA( (l,r), (le,re),
(Any,
return ::OrdEqual;
@@ -581,7 +581,7 @@ void PatternRulesetBuilder::append_from_lit(const Span& sp, const ::HIR::Literal
(Struct,
ASSERT_BUG(sp, lit.is_List(), "Matching struct non-list literal - " << ty << " with " << lit);
const auto& list = lit.as_List();
-
+
auto monomorph = [&](const auto& ty) {
auto rv = monomorphise_type(sp, pbe->m_params, e.path.m_data.as_Generic().m_params, ty);
this->m_resolve.expand_associated_types(sp, rv);
@@ -600,7 +600,7 @@ void PatternRulesetBuilder::append_from_lit(const Span& sp, const ::HIR::Literal
{
::HIR::TypeRef tmp;
const auto& sty_mono = (monomorphise_type_needed(sd[i].ent) ? tmp = monomorph(sd[i].ent) : sd[i].ent);
-
+
this->append_from_lit(sp, list[i], sty_mono);
m_field_path.back() ++;
}
@@ -614,7 +614,7 @@ void PatternRulesetBuilder::append_from_lit(const Span& sp, const ::HIR::Literal
const auto& fld_ty = sd[i].second.ent;
::HIR::TypeRef tmp;
const auto& sty_mono = (monomorphise_type_needed(fld_ty) ? tmp = monomorph(fld_ty) : fld_ty);
-
+
this->append_from_lit(sp, list[i], sty_mono);
m_field_path.back() ++;
}
@@ -633,14 +633,14 @@ void PatternRulesetBuilder::append_from_lit(const Span& sp, const ::HIR::Literal
this->m_resolve.expand_associated_types(sp, rv);
return rv;
};
-
+
ASSERT_BUG(sp, var_idx < pbe->m_variants.size(), "Literal refers to a variant out of range");
const auto& var_def = pbe->m_variants.at(var_idx);
-
+
PatternRulesetBuilder sub_builder { this->m_resolve };
sub_builder.m_field_path = m_field_path;
sub_builder.m_field_path.push_back(0);
-
+
TU_MATCH( ::HIR::Enum::Variant, (var_def.second), (fields_def),
(Unit,
),
@@ -654,10 +654,10 @@ void PatternRulesetBuilder::append_from_lit(const Span& sp, const ::HIR::Literal
sub_builder.m_field_path.back() = i;
const auto& val = list[i];
const auto& ty_tpl = fields_def[i].ent;
-
+
::HIR::TypeRef tmp;
const auto& subty = (monomorphise_type_needed(ty_tpl) ? tmp = monomorph(ty_tpl) : ty_tpl);
-
+
sub_builder.append_from_lit( sp, val, subty );
}
),
@@ -669,15 +669,15 @@ void PatternRulesetBuilder::append_from_lit(const Span& sp, const ::HIR::Literal
sub_builder.m_field_path.back() = i;
const auto& val = list[i];
const auto& ty_tpl = fields_def[i].second.ent;
-
+
::HIR::TypeRef tmp;
const auto& subty = (monomorphise_type_needed(ty_tpl) ? tmp = monomorph(ty_tpl) : ty_tpl);
-
+
sub_builder.append_from_lit( sp, val, subty );
}
)
)
-
+
this->push_rule( PatternRule::make_Variant({ var_idx, mv$(sub_builder.m_rules) }) );
)
)
@@ -697,7 +697,7 @@ void PatternRulesetBuilder::append_from_lit(const Span& sp, const ::HIR::Literal
ASSERT_BUG(sp, lit.is_List(), "Matching array with non-list literal - " << lit);
const auto& list = lit.as_List();
ASSERT_BUG(sp, e.size_val == list.size(), "Matching array with mismatched literal size - " << e.size_val << " != " << list.size());
-
+
// Sequential match just like tuples.
m_field_path.push_back(0);
for(unsigned int i = 0; i < e.size_val; i ++) {
@@ -709,7 +709,7 @@ void PatternRulesetBuilder::append_from_lit(const Span& sp, const ::HIR::Literal
(Slice,
ASSERT_BUG(sp, lit.is_List(), "Matching array with non-list literal - " << lit);
const auto& list = lit.as_List();
-
+
PatternRulesetBuilder sub_builder { this->m_resolve };
sub_builder.m_field_path = m_field_path;
sub_builder.m_field_path.push_back(0);
@@ -772,7 +772,7 @@ void PatternRulesetBuilder::append_from(const Span& sp, const ::HIR::Pattern& pa
throw "";
}
};
-
+
// TODO: Outer handling for Value::Named patterns
// - Convert them into either a pattern, or just a variant of this function that operates on ::HIR::Literal
// > It does need a way of handling unknown-value constants (e.g. <GenericT as Foo>::CONST)
@@ -790,7 +790,7 @@ void PatternRulesetBuilder::append_from(const Span& sp, const ::HIR::Pattern& pa
}
)
)
-
+
TU_MATCHA( (ty.m_data), (e),
(Infer, BUG(sp, "Ivar for in match type"); ),
(Diverge,
@@ -939,7 +939,7 @@ void PatternRulesetBuilder::append_from(const Span& sp, const ::HIR::Pattern& pa
return rv;
};
const auto& str_data = pbe->m_data;
-
+
if( m_lang_Box && e.path.m_data.as_Generic().m_path == *m_lang_Box )
{
const auto& inner_ty = e.path.m_data.as_Generic().m_params.m_types.at(0);
@@ -994,7 +994,7 @@ void PatternRulesetBuilder::append_from(const Span& sp, const ::HIR::Pattern& pa
{
const auto& fld = sd[i];
const auto& fld_pat = pe.sub_patterns[i];
-
+
::HIR::TypeRef tmp;
const auto& sty_mono = (monomorphise_type_needed(fld.ent) ? tmp = monomorph(fld.ent) : fld.ent);
this->append_from(sp, fld_pat, sty_mono);
@@ -1025,7 +1025,7 @@ void PatternRulesetBuilder::append_from(const Span& sp, const ::HIR::Pattern& pa
{
::HIR::TypeRef tmp;
const auto& sty_mono = (monomorphise_type_needed(fld.second.ent) ? tmp = monomorph(fld.second.ent) : fld.second.ent);
-
+
auto it = ::std::find_if( pe.sub_patterns.begin(), pe.sub_patterns.end(), [&](const auto& x){ return x.first == fld.first; } );
if( it == pe.sub_patterns.end() )
{
@@ -1070,7 +1070,7 @@ void PatternRulesetBuilder::append_from(const Span& sp, const ::HIR::Pattern& pa
),
(EnumTuple,
const auto& var_def = pe.binding_ptr->m_variants.at(pe.binding_idx);
-
+
const auto& fields_def = var_def.second.as_Tuple();
PatternRulesetBuilder sub_builder { this->m_resolve };
sub_builder.m_field_path = m_field_path;
@@ -1080,10 +1080,10 @@ void PatternRulesetBuilder::append_from(const Span& sp, const ::HIR::Pattern& pa
sub_builder.m_field_path.back() = i;
const auto& subpat = pe.sub_patterns[i];
const auto& ty_tpl = fields_def[i].ent;
-
+
::HIR::TypeRef tmp;
const auto& subty = (monomorphise_type_needed(ty_tpl) ? tmp = monomorph(ty_tpl) : ty_tpl);
-
+
sub_builder.append_from( sp, subpat, subty );
}
if( sub_builder.m_is_impossible )
@@ -1111,7 +1111,7 @@ void PatternRulesetBuilder::append_from(const Span& sp, const ::HIR::Pattern& pa
for( unsigned int i = 0; i < tmp.size(); i ++ )
{
sub_builder.m_field_path.back() = i;
-
+
auto subty = monomorph(fields_def[i].second.ent);
if( tmp[i] == ~0u ) {
sub_builder.append_from( sp, ::HIR::Pattern(), subty );
@@ -1194,7 +1194,7 @@ void PatternRulesetBuilder::append_from(const Span& sp, const ::HIR::Pattern& pa
sub_builder.append_from( sp, subpat, *e.inner );
sub_builder.m_field_path.back() ++;
}
-
+
// Encodes length check and sub-pattern rules
this->push_rule( PatternRule::make_Slice({ static_cast<unsigned int>(pe.sub_patterns.size()), mv$(sub_builder.m_rules) }) );
),
@@ -1208,14 +1208,14 @@ void PatternRulesetBuilder::append_from(const Span& sp, const ::HIR::Pattern& pa
sub_builder.m_field_path.back() ++;
}
auto leading = mv$(sub_builder.m_rules);
-
+
sub_builder.m_field_path.back() = 0;
if( pe.trailing.size() )
{
TODO(sp, "SplitSlice on [T] with trailing - " << pat);
}
auto trailing = mv$(sub_builder.m_rules);
-
+
this->push_rule( PatternRule::make_SplitSlice({
static_cast<unsigned int>(pe.leading.size() + pe.trailing.size()),
mv$(leading), mv$(trailing)
@@ -1245,7 +1245,7 @@ void PatternRulesetBuilder::append_from(const Span& sp, const ::HIR::Pattern& pa
data.reserve(s.size());
for(auto c : s)
data.push_back(c);
-
+
this->push_rule( PatternRule::make_Value( mv$(data) ) );
}
// TODO: Handle named values
@@ -1291,13 +1291,13 @@ namespace {
::MIR::LValue lval = top_val.clone();
::HIR::TypeRef tmp_ty;
const ::HIR::TypeRef* cur_ty = &top_ty;
-
+
// TODO: Cache the correspondance of path->type (lval can be inferred)
ASSERT_BUG(sp, field_path_ofs <= field_path.size(), "Field path offset " << field_path_ofs << " is larger than the path [" << field_path << "]");
for(unsigned int i = field_path_ofs; i < field_path.size(); i ++ )
{
auto idx = field_path.data[i];
-
+
TU_MATCHA( (cur_ty->m_data), (e),
(Infer, BUG(sp, "Ivar for in match type"); ),
(Diverge, BUG(sp, "Diverge in match type"); ),
@@ -1421,7 +1421,7 @@ namespace {
)
)
}
-
+
out_ty = (cur_ty == &tmp_ty ? mv$(tmp_ty) : cur_ty->clone());
out_val = mv$(lval);
}
@@ -1435,7 +1435,7 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
void MIR_LowerHIR_Match_Simple( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNode_Match& node, ::MIR::LValue match_val, t_arm_rules arm_rules, ::std::vector<ArmCode> arms_code, ::MIR::BasicBlockId first_cmp_block )
{
TRACE_FUNCTION;
-
+
// 1. Generate pattern matches
unsigned int rule_idx = 0;
builder.set_cur_block( first_cmp_block );
@@ -1443,18 +1443,18 @@ void MIR_LowerHIR_Match_Simple( MirBuilder& builder, MirConverter& conv, ::HIR::
{
const auto& arm = node.m_arms[arm_idx];
auto& arm_code = arms_code[arm_idx];
-
+
auto next_arm_bb = builder.new_bb_unlinked();
-
+
for( unsigned int i = 0; i < arm.m_patterns.size(); i ++ )
{
if( arm_code.destructures[i] == 0 )
continue ;
-
+
const auto& pat_rule = arm_rules[rule_idx];
bool is_last_pat = (i+1 == arm.m_patterns.size());
auto next_pattern_bb = (!is_last_pat ? builder.new_bb_unlinked() : next_arm_bb);
-
+
// 1. Check
// - If the ruleset is empty, this is a _ arm over a value
if( pat_rule.m_rules.size() > 0 )
@@ -1463,7 +1463,7 @@ void MIR_LowerHIR_Match_Simple( MirBuilder& builder, MirConverter& conv, ::HIR::
}
builder.end_block( ::MIR::Terminator::make_Goto(arm_code.destructures[i]) );
builder.set_cur_block( arm_code.destructures[i] );
-
+
// - Go to code/condition check
if( arm_code.has_condition )
{
@@ -1473,12 +1473,12 @@ void MIR_LowerHIR_Match_Simple( MirBuilder& builder, MirConverter& conv, ::HIR::
{
builder.end_block( ::MIR::Terminator::make_Goto(arm_code.code) );
}
-
+
if( !is_last_pat )
{
builder.set_cur_block( next_pattern_bb );
}
-
+
rule_idx ++;
}
if( arm_code.has_condition )
@@ -1499,17 +1499,17 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
{
const auto& rule = rules[rule_idx];
DEBUG("rule = " << rule);
-
+
// Don't emit anything for '_' matches
if( rule.is_Any() )
continue ;
-
+
::MIR::LValue val;
::HIR::TypeRef ity;
-
+
get_ty_and_val(sp, builder.resolve(), top_ty, top_val, rule.field_path, field_path_ofs, ity, val);
DEBUG("ty = " << ity << ", val = " << val);
-
+
const auto& ty = ity;
TU_MATCHA( (ty.m_data), (te),
(Infer,
@@ -1524,9 +1524,9 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
case ::HIR::CoreType::Bool: {
ASSERT_BUG(sp, rule.is_Bool(), "PatternRule for bool isn't _Bool");
bool test_val = rule.as_Bool();
-
+
auto succ_bb = builder.new_bb_unlinked();
-
+
if( test_val ) {
builder.end_block( ::MIR::Terminator::make_If({ val.clone(), succ_bb, fail_bb }) );
}
@@ -1546,7 +1546,7 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
),
(Value,
auto succ_bb = builder.new_bb_unlinked();
-
+
auto test_lval = builder.lvalue_or_temp(sp, te, ::MIR::Constant(re.as_Uint()));
auto cmp_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ val.clone(), ::MIR::eBinOp::EQ, mv$(test_lval) }));
builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval), succ_bb, fail_bb }) );
@@ -1568,7 +1568,7 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
),
(Value,
auto succ_bb = builder.new_bb_unlinked();
-
+
auto test_lval = builder.lvalue_or_temp(sp, te, ::MIR::Constant(re.as_Int()));
auto cmp_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ val.clone(), ::MIR::eBinOp::EQ, mv$(test_lval) }));
builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval), succ_bb, fail_bb }) );
@@ -1586,7 +1586,7 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
),
(Value,
auto succ_bb = builder.new_bb_unlinked();
-
+
auto test_lval = builder.lvalue_or_temp(sp, te, ::MIR::Constant(re.as_Uint()));
auto cmp_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ val.clone(), ::MIR::eBinOp::EQ, mv$(test_lval) }));
builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval), succ_bb, fail_bb }) );
@@ -1595,19 +1595,19 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
(ValueRange,
auto succ_bb = builder.new_bb_unlinked();
auto test_bb_2 = builder.new_bb_unlinked();
-
+
// IF `val` < `first` : fail_bb
auto test_lt_lval = builder.lvalue_or_temp(sp, te, ::MIR::Constant(re.first.as_Uint()));
auto cmp_lt_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ val.clone(), ::MIR::eBinOp::LT, mv$(test_lt_lval) }));
builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lt_lval), fail_bb, test_bb_2 }) );
-
+
builder.set_cur_block(test_bb_2);
-
+
// IF `val` > `last` : fail_bb
auto test_gt_lval = builder.lvalue_or_temp(sp, te, ::MIR::Constant(re.last.as_Uint()));
auto cmp_gt_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ val.clone(), ::MIR::eBinOp::GT, mv$(test_gt_lval) }));
builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_gt_lval), fail_bb, succ_bb }) );
-
+
builder.set_cur_block(succ_bb);
)
)
@@ -1619,9 +1619,9 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
case ::HIR::CoreType::Str: {
ASSERT_BUG(sp, rule.is_Value() && rule.as_Value().is_StaticString(), "");
const auto& v = rule.as_Value();
-
+
auto succ_bb = builder.new_bb_unlinked();
-
+
auto test_lval = builder.lvalue_or_temp(sp, ::HIR::TypeRef::new_borrow(::HIR::BorrowType::Shared, ty.clone()), ::MIR::RValue(::MIR::Constant( v.as_StaticString() )));
auto cmp_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ val.clone(), ::MIR::eBinOp::EQ, ::MIR::LValue::make_Deref({box$(test_lval)}) }));
builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval), succ_bb, fail_bb }) );
@@ -1659,17 +1659,17 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
ASSERT_BUG(sp, rule.is_Variant(), "Rule for enum isn't Any or Variant");
const auto& re = rule.as_Variant();
unsigned int var_idx = re.idx;
-
+
auto next_bb = builder.new_bb_unlinked();
auto var_count = pbe->m_variants.size();
-
+
// Generate a switch with only one option different.
::std::vector< ::MIR::BasicBlockId> arms(var_count, fail_bb);
arms[var_idx] = next_bb;
builder.end_block( ::MIR::Terminator::make_Switch({ val.clone(), mv$(arms) }) );
-
+
builder.set_cur_block(next_bb);
-
+
if( re.sub_rules.size() > 0 )
{
const auto& var_data = pbe->m_variants.at(re.idx).second;
@@ -1689,7 +1689,7 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
fake_ty_ents.push_back( monomorph(ve[i].ent) );
}
::HIR::TypeRef fake_tup = ::HIR::TypeRef( mv$(fake_ty_ents) );
-
+
// Recurse with the new ruleset
MIR_LowerHIR_Match_Simple__GeneratePattern(builder, sp,
re.sub_rules.data(), re.sub_rules.size(),
@@ -1706,7 +1706,7 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
fake_ty_ents.push_back( monomorph(ve[i].second.ent) );
}
::HIR::TypeRef fake_tup = ::HIR::TypeRef( mv$(fake_ty_ents) );
-
+
// Recurse with the new ruleset
MIR_LowerHIR_Match_Simple__GeneratePattern(builder, sp,
re.sub_rules.data(), re.sub_rules.size(),
@@ -1752,11 +1752,11 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
if( rule.is_Value() ) {
ASSERT_BUG(sp, *te.inner == ::HIR::CoreType::U8, "Bytes pattern on non-&[u8]");
auto cloned_val = ::MIR::Constant( rule.as_Value().as_Bytes() );
-
+
auto succ_bb = builder.new_bb_unlinked();
-
+
auto inner_val = val.as_Deref().val->clone();
-
+
auto test_lval = builder.lvalue_or_temp(sp, ::HIR::TypeRef::new_borrow(::HIR::BorrowType::Shared, ty.clone()), ::MIR::RValue(mv$(cloned_val)));
auto cmp_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ mv$(inner_val), ::MIR::eBinOp::EQ, mv$(test_lval) }));
builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval), succ_bb, fail_bb }) );
@@ -1764,16 +1764,16 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
}
else if( rule.is_Slice() ) {
const auto& re = rule.as_Slice();
-
+
// Compare length
auto test_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Usize, ::MIR::RValue( ::MIR::Constant::make_Uint(re.len) ));
auto len_val = builder.lvalue_or_temp(sp, ::HIR::CoreType::Usize, ::MIR::RValue::make_DstMeta({ val.clone() }));
auto cmp_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ mv$(len_val), ::MIR::eBinOp::EQ, mv$(test_lval) }));
-
+
auto len_succ_bb = builder.new_bb_unlinked();
builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval), len_succ_bb, fail_bb }) );
builder.set_cur_block(len_succ_bb);
-
+
// Recurse checking values
MIR_LowerHIR_Match_Simple__GeneratePattern(builder, sp,
re.sub_rules.data(), re.sub_rules.size(),
@@ -1783,22 +1783,22 @@ int MIR_LowerHIR_Match_Simple__GeneratePattern(MirBuilder& builder, const Span&
}
else if( rule.is_SplitSlice() ) {
const auto& re = rule.as_SplitSlice();
-
+
// Compare length
auto test_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Usize, ::MIR::RValue( ::MIR::Constant::make_Uint(re.min_len) ));
auto len_val = builder.lvalue_or_temp(sp, ::HIR::CoreType::Usize, ::MIR::RValue::make_DstMeta({ val.clone() }));
auto cmp_lval = builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ mv$(len_val), ::MIR::eBinOp::LT, mv$(test_lval) }));
-
+
auto len_succ_bb = builder.new_bb_unlinked();
builder.end_block( ::MIR::Terminator::make_If({ mv$(cmp_lval), fail_bb, len_succ_bb }) ); // if len < test : FAIL
builder.set_cur_block(len_succ_bb);
-
+
MIR_LowerHIR_Match_Simple__GeneratePattern(builder, sp,
re.leading.data(), re.leading.size(),
top_ty, top_val, field_path_ofs,
fail_bb
);
-
+
if( re.trailing.size() > 0 )
{
TODO(sp, "Match over Slice using SplitSlice with trailing - " << rule);
@@ -1841,13 +1841,13 @@ struct DecisionTreeNode
(Subtree, ::std::unique_ptr<DecisionTreeNode>),
(Terminal, unsigned int)
);
-
+
template<typename T>
struct Range
{
T start;
T end;
-
+
// `x` starts after this range ends
bool operator<(const Range<T>& x) const {
return (end < x.start);
@@ -1856,7 +1856,7 @@ struct DecisionTreeNode
bool operator<(const T& x) const {
return (end < x);
}
-
+
// `x` ends before this starts, or overlaps
bool operator>=(const Range<T>& x) const {
return start > x.end || ovelaps(x);
@@ -1865,28 +1865,28 @@ struct DecisionTreeNode
bool operator>=(const T& x) const {
return start > x || contains(x);
}
-
+
bool operator>(const Range<T>& x) const {
return (start > x.end);
}
bool operator>(const T& x) const {
return (start > x);
}
-
+
bool operator==(const Range<T>& x) const {
return start == x.start && end == x.end;
}
bool operator!=(const Range<T>& x) const {
return start != x.start || end != x.end;
}
-
+
bool contains(const T& x) const {
return (start <= x && x <= end);
}
bool overlaps(const Range<T>& x) const {
return (x.start <= start && start <= x.end) || (x.start <= end && end <= x.end);
}
-
+
friend ::std::ostream& operator<<(::std::ostream& os, const Range<T>& x) {
if( x.start == x.end ) {
return os << x.start;
@@ -1896,7 +1896,7 @@ struct DecisionTreeNode
}
}
};
-
+
TAGGED_UNION( Values, Unset,
(Unset, struct {}),
(Bool, struct { Branch false_branch, true_branch; }),
@@ -1910,23 +1910,23 @@ struct DecisionTreeNode
//::std::vector< ::std::pair< unsigned int, Branch> > variable_arms;
})
);
-
+
// TODO: Arm specialisation?
field_path_t m_field_path;
Values m_branches;
Branch m_default;
-
+
DecisionTreeNode( field_path_t field_path ):
// TODO: This is commented out fo a reason, but I don't know why.
//m_field_path( mv$(field_path) ),
m_branches(),
m_default()
{}
-
+
static Branch clone(const Branch& b);
static Values clone(const Values& x);
DecisionTreeNode clone() const;
-
+
void populate_tree_from_rule(const Span& sp, unsigned int arm_index, const PatternRule* first_rule, unsigned int rule_count) {
populate_tree_from_rule(sp, first_rule, rule_count, [sp,arm_index](auto& branch){
TU_MATCHA( (branch), (e),
@@ -1951,14 +1951,14 @@ struct DecisionTreeNode
}
// `and_then` - Closure called after processing the final rule
void populate_tree_from_rule(const Span& sp, const PatternRule* first_rule, unsigned int rule_count, ::std::function<void(Branch&)> and_then);
-
+
/// Simplifies the tree by eliminating nodes that don't make a decision
void simplify();
/// Propagate the m_default arm's contents to value arms, and vice-versa
void propagate_default();
/// HELPER: Unfies the rules from the provided branch with this node
void unify_from(const Branch& b);
-
+
::MIR::LValue get_field(const ::MIR::LValue& base, unsigned int base_depth) const {
::MIR::LValue cur = base.clone();
for(unsigned int i = base_depth; i < m_field_path.size(); i ++ ) {
@@ -1972,7 +1972,7 @@ struct DecisionTreeNode
}
return cur;
}
-
+
friend ::std::ostream& operator<<(::std::ostream& os, const Branch& x);
friend ::std::ostream& operator<<(::std::ostream& os, const DecisionTreeNode& x);
};
@@ -1981,16 +1981,16 @@ struct DecisionTreeGen
{
MirBuilder& m_builder;
const ::std::vector< ::MIR::BasicBlockId>& m_rule_blocks;
-
+
DecisionTreeGen(MirBuilder& builder, const ::std::vector< ::MIR::BasicBlockId >& rule_blocks):
m_builder( builder ),
m_rule_blocks( rule_blocks )
{}
-
+
::MIR::BasicBlockId get_block_for_rule(unsigned int rule_index) {
return m_rule_blocks.at( rule_index );
}
-
+
void generate_tree_code(const Span& sp, const DecisionTreeNode& node, const ::HIR::TypeRef& ty, const ::MIR::LValue& val) {
generate_tree_code(sp, node, ty, 0, val, [&](const auto& n){
DEBUG("node = " << n);
@@ -2004,9 +2004,9 @@ struct DecisionTreeGen
const ::HIR::TypeRef& ty, unsigned int path_ofs, const ::MIR::LValue& base_val,
::std::function<void(const DecisionTreeNode&)> and_then
);
-
+
void generate_branch(const DecisionTreeNode::Branch& branch, ::std::function<void(const DecisionTreeNode&)> cb);
-
+
void generate_branches_Signed(
const Span& sp,
const DecisionTreeNode::Branch& default_branch,
@@ -2049,7 +2049,7 @@ struct DecisionTreeGen
const ::HIR::TypeRef& ty, ::MIR::LValue val,
::std::function<void(const DecisionTreeNode&)> and_then
);
-
+
void generate_branches_Enum(
const Span& sp,
const DecisionTreeNode::Branch& default_branch,
@@ -2077,7 +2077,7 @@ struct DecisionTreeGen
void MIR_LowerHIR_Match_DecisionTree( MirBuilder& builder, MirConverter& conv, ::HIR::ExprNode_Match& node, ::MIR::LValue match_val, t_arm_rules arm_rules, ::std::vector<ArmCode> arms_code, ::MIR::BasicBlockId first_cmp_block )
{
TRACE_FUNCTION;
-
+
// XXX XXX XXX: The current codegen (below) will generate incorrect code if ordering matters.
// ```
// match ("foo", "bar")
@@ -2091,14 +2091,14 @@ void MIR_LowerHIR_Match_DecisionTree( MirBuilder& builder, MirConverter& conv, :
// TODO: Sort the columns in `arm_rules` to ensure that the most specific rule is parsed first.
// - Ordering within a pattern doesn't matter, only the order of arms matters.
// - This sort could be designed such that the above case would match correctly?
-
+
DEBUG("- Generating rule bindings");
::std::vector< ::MIR::BasicBlockId> rule_blocks;
for(const auto& rule : arm_rules)
{
const auto& arm_code = arms_code[rule.arm_idx];
ASSERT_BUG(node.span(), !arm_code.has_condition, "Decision tree doesn't (yet) support conditionals");
-
+
assert( rule.pat_idx < arm_code.destructures.size() );
// Set the target for when a rule succeeds to the destructuring code for this rule
rule_blocks.push_back( arm_code.destructures[rule.pat_idx] );
@@ -2106,8 +2106,8 @@ void MIR_LowerHIR_Match_DecisionTree( MirBuilder& builder, MirConverter& conv, :
builder.set_cur_block( rule_blocks.back() );
builder.end_block( ::MIR::Terminator::make_Goto(arm_code.code) );
}
-
-
+
+
// - Build tree by running each arm's pattern across it
DEBUG("- Building decision tree");
DecisionTreeNode root_node({});
@@ -2125,7 +2125,7 @@ void MIR_LowerHIR_Match_DecisionTree( MirBuilder& builder, MirConverter& conv, :
root_node.propagate_default();
DEBUG("root_node = " << root_node);
// TODO: Pretty print `root_node`
-
+
// - Convert the above decision tree into MIR
DEBUG("- Emitting decision tree");
DecisionTreeGen gen { builder, rule_blocks };
@@ -2144,7 +2144,7 @@ DecisionTreeNode MIR_LowerHIR_Match_DecisionTree__MakeTree(const Span& sp, t_arm
rules.push_back( arm_rules[i].m_rules );
indexes.push_back(i);
}
-
+
return MIR_LowerHIR_Match_DecisionTree__MakeTree_Node(sp, indexes, rules);
}
DecisionTreeNode MIR_LowerHIR_Match_DecisionTree__MakeTree_Node(const Span& sp, slice<unsigned int> arm_indexes, slice< slice<PaternRule>> arm_rules)
@@ -2152,13 +2152,13 @@ DecisionTreeNode MIR_LowerHIR_Match_DecisionTree__MakeTree_Node(const Span& sp,
assert( arm_indexes.size() == arm_rules.size() );
assert( arm_rules.size() > 1 );
assert( arm_rules[0].size() > 0 );
-
+
// 1. Sort list (should it already be sorted?)
for(const auto& rules : arm_rules)
{
ASSERT_BUG(sp, rules.size() != arm_rules[0].size(), "");
}
-
+
// 2. Detect all arms being `_` and move on to the next condition
while( ::std::all_of(arm_rules.begin(), arm_rules.end(), [](const auto& r){ return r.m_rules[0].is_Any(); }) )
{
@@ -2167,27 +2167,27 @@ DecisionTreeNode MIR_LowerHIR_Match_DecisionTree__MakeTree_Node(const Span& sp,
// No rules left?
BUG(sp, "Duplicate match arms");
}
-
+
for(auto& rules : arm_rules)
{
rules = rules.subslice_from(1);
}
}
-
+
// We have a codition.
for(const auto& rules : arm_rules)
{
ASSERT_BUG(sp, rules[0].is_Any() || rules[0].tag() == arm_rules[0][0].tag(), "Mismatched rules in match");
}
-
+
bool has_any = arm_rules.back()[0].is_Any();
-
+
// All rules must either be _ or the same type, and can't all be _
switch( arm_rules[0][0].tag() )
{
case PatternRule::TAGDEAD: throw "";
case PatternRule::TAG_Any: throw "";
-
+
case PatternRule::TAG_Variant:
break;
// TODO: Value and ValueRange can appear together.
@@ -2273,7 +2273,7 @@ namespace
{
return DecisionTreeNode::Branch( box$(DecisionTreeNode( mv$(path) )) );
}
-
+
// Common code for numerics (Int, Uint, and Float)
template<typename T>
static void from_rule_valuerange(
@@ -2284,7 +2284,7 @@ namespace
{
ASSERT_BUG(sp, ve_start != ve_end, "Range pattern with one value - " << ve_start);
ASSERT_BUG(sp, ve_start < ve_end, "Range pattern with a start after the end - " << ve_start << "..." << ve_end);
-
+
TRACE_FUNCTION_F("[" << FMT_CB(os, for(const auto& i:be) os << i.first <<" , ";) << "]");
// - Find the first entry that ends after the new one starts.
auto it = ::std::find_if(be.begin(), be.end(), [&](const auto& v){ return v.first.end >= ve_start; });
@@ -2332,7 +2332,7 @@ namespace
DEBUG("- Shared head, continue");
//assert(it->first.start == ve_start);
assert((it->first.end) < ve_end);
-
+
if( it->first.start != it->first.end )
and_then(it->second);
ve_start = it->first.end + 1;
@@ -2340,7 +2340,7 @@ namespace
}
}
}
-
+
template<typename T>
static void from_rule_value(
const Span& sp,
@@ -2366,14 +2366,14 @@ void DecisionTreeNode::populate_tree_from_rule(const Span& sp, const PatternRule
{
assert( rule_count > 0 );
const auto& rule = *first_rule;
-
+
if( m_field_path.size() == 0 ) {
m_field_path = rule.field_path;
}
else {
ASSERT_BUG(sp, m_field_path == rule.field_path, "Patterns with mismatched field paths - " << m_field_path << " != " << rule.field_path);
}
-
+
#define GET_BRANCHES(fld, var) (({if( fld.is_Unset() ) {\
fld = Values::make_##var({}); \
} \
@@ -2382,7 +2382,7 @@ void DecisionTreeNode::populate_tree_from_rule(const Span& sp, const PatternRule
}}), \
fld.as_##var())
-
+
TU_MATCHA( (rule), (e),
(Any, {
if( rule_count == 1 )
@@ -2408,7 +2408,7 @@ void DecisionTreeNode::populate_tree_from_rule(const Span& sp, const PatternRule
}),
(Variant, {
auto& be = GET_BRANCHES(m_branches, Variant);
-
+
auto it = ::std::find_if( be.begin(), be.end(), [&](const auto& x){ return x.first >= e.idx; });
// If this variant isn't yet processed, add a new subtree for it
if( it == be.end() || it->first != e.idx ) {
@@ -2423,7 +2423,7 @@ void DecisionTreeNode::populate_tree_from_rule(const Span& sp, const PatternRule
assert( it->second.is_Subtree() );
}
auto& subtree = *it->second.as_Subtree();
-
+
if( e.sub_rules.size() > 0 && rule_count > 1 )
{
subtree.populate_tree_from_rule(sp, e.sub_rules.data(), e.sub_rules.size(), [&](auto& branch){
@@ -2455,7 +2455,7 @@ void DecisionTreeNode::populate_tree_from_rule(const Span& sp, const PatternRule
}),
(Slice,
auto& be = GET_BRANCHES(m_branches, Slice);
-
+
auto it = ::std::find_if( be.fixed_arms.begin(), be.fixed_arms.end(), [&](const auto& x){ return x.first >= e.len; } );
if( it == be.fixed_arms.end() || it->first != e.len ) {
it = be.fixed_arms.insert(it, ::std::make_pair(e.len, new_branch_subtree(rule.field_path)));
@@ -2468,7 +2468,7 @@ void DecisionTreeNode::populate_tree_from_rule(const Span& sp, const PatternRule
}
assert( it->second.is_Subtree() );
auto& subtree = *it->second.as_Subtree();
-
+
if( e.sub_rules.size() > 0 && rule_count > 1 )
{
subtree.populate_tree_from_rule(sp, e.sub_rules.data(), e.sub_rules.size(), [&](auto& branch){
@@ -2504,7 +2504,7 @@ void DecisionTreeNode::populate_tree_from_rule(const Span& sp, const PatternRule
),
(Bool,
auto& be = GET_BRANCHES(m_branches, Bool);
-
+
auto& branch = (e ? be.true_branch : be.false_branch);
if( branch.is_Unset() ) {
branch = new_branch_subtree( rule.field_path );
@@ -2529,7 +2529,7 @@ void DecisionTreeNode::populate_tree_from_rule(const Span& sp, const PatternRule
TU_MATCHA( (e), (ve),
(Int,
auto& be = GET_BRANCHES(m_branches, Signed);
-
+
// TODO: De-duplicate this code between Uint and Float
from_rule_value(sp, be, ve, "Signed", rule.field_path,
[&](auto& branch) {
@@ -2546,7 +2546,7 @@ void DecisionTreeNode::populate_tree_from_rule(const Span& sp, const PatternRule
),
(Uint,
auto& be = GET_BRANCHES(m_branches, Unsigned);
-
+
from_rule_value(sp, be, ve, "Unsigned", rule.field_path,
[&](auto& branch) {
if( rule_count > 1 ) {
@@ -2562,7 +2562,7 @@ void DecisionTreeNode::populate_tree_from_rule(const Span& sp, const PatternRule
),
(Float,
auto& be = GET_BRANCHES(m_branches, Float);
-
+
from_rule_value(sp, be, ve, "Float", rule.field_path,
[&](auto& branch) {
if( rule_count > 1 ) {
@@ -2583,7 +2583,7 @@ void DecisionTreeNode::populate_tree_from_rule(const Span& sp, const PatternRule
),
(StaticString,
auto& be = GET_BRANCHES(m_branches, String);
-
+
auto it = ::std::find_if(be.begin(), be.end(), [&](const auto& v){ return v.first >= ve; });
if( it == be.end() || it->first != ve ) {
it = be.insert( it, ::std::make_pair(ve, new_branch_subtree(rule.field_path) ) );
@@ -2609,7 +2609,7 @@ void DecisionTreeNode::populate_tree_from_rule(const Span& sp, const PatternRule
)
),
(ValueRange,
-
+
ASSERT_BUG(sp, e.first.tag() == e.last.tag(), "");
TU_MATCHA( (e.first, e.last), (ve_start, ve_end),
(Int,
@@ -2695,7 +2695,7 @@ void DecisionTreeNode::simplify()
)
}
};
-
+
TU_MATCHA( (m_branches), (e),
(Unset,
H::simplify_branch(m_default);
@@ -2742,7 +2742,7 @@ void DecisionTreeNode::simplify()
}
)
)
-
+
H::simplify_branch(m_default);
}
@@ -2762,7 +2762,7 @@ void DecisionTreeNode::propagate_default()
)
}
};
-
+
TU_MATCHA( (m_branches), (e),
(Unset,
),
@@ -2812,7 +2812,7 @@ void DecisionTreeNode::propagate_default()
DEBUG("- default");
TU_IFLET(Branch, m_default, Subtree, be,
be->propagate_default();
-
+
if( be->m_default.is_Unset() ) {
// Propagate default from value branches
TU_MATCHA( (m_branches), (e),
@@ -2869,7 +2869,7 @@ namespace {
// Terminal, no unify
}
}
-
+
template<typename T>
void unify_from_vals_range(::std::vector< ::std::pair<T, DecisionTreeNode::Branch>>& dst, const ::std::vector< ::std::pair<T, DecisionTreeNode::Branch>>& src)
{
@@ -2893,7 +2893,7 @@ namespace {
}
}
}
-
+
template<typename T>
void unify_from_vals_pt(::std::vector< ::std::pair<T, DecisionTreeNode::Branch>>& dst, const ::std::vector< ::std::pair<T, DecisionTreeNode::Branch>>& src)
{
@@ -2915,9 +2915,9 @@ namespace {
void DecisionTreeNode::unify_from(const Branch& b)
{
TRACE_FUNCTION_FR(*this << " with " << b, *this);
-
+
assert( b.is_Terminal() || b.is_Subtree() );
-
+
if( m_default.is_Unset() ) {
if( b.is_Terminal() ) {
m_default = clone(b);
@@ -2926,7 +2926,7 @@ void DecisionTreeNode::unify_from(const Branch& b)
m_default = clone(b.as_Subtree()->m_default);
}
}
-
+
if( b.is_Subtree() && b.as_Subtree()->m_branches.tag() != m_branches.tag() ) {
// Is this a bug, or expected (and don't unify in?)
DEBUG("TODO - Unify mismatched arms? - " << b.as_Subtree()->m_branches.tag_str() << " and " << m_branches.tag_str());
@@ -2936,7 +2936,7 @@ void DecisionTreeNode::unify_from(const Branch& b)
//if( b.is_Subtree() ) {
// ASSERT_BUG(Span(), this->m_field_path == b.as_Subtree()->m_field_path, "Unifiying DTNs with mismatched paths - " << this->m_field_path << " != " << b.as_Subtree()->m_field_path);
//}
-
+
TU_MATCHA( (m_branches), (dst),
(Unset,
if( b.is_Subtree() ) {
@@ -2948,7 +2948,7 @@ void DecisionTreeNode::unify_from(const Branch& b)
),
(Bool,
auto* src = (b.is_Subtree() ? &b.as_Subtree()->m_branches.as_Bool() : nullptr);
-
+
unify_branch( dst.false_branch, (src ? src->false_branch : b) );
unify_branch( dst.true_branch , (src ? src->true_branch : b) );
),
@@ -3020,7 +3020,7 @@ void DecisionTreeNode::unify_from(const Branch& b)
if( should_unify_subtree ) {
auto& sb = b.as_Subtree()->m_branches;
ASSERT_BUG(Span(), sb.is_Slice(), "Unifying Slice with " << sb.tag_str());
-
+
const auto& src = sb.as_Slice();
unify_from_vals_pt(dst.fixed_arms, src.fixed_arms);
}
@@ -3113,7 +3113,7 @@ void DecisionTreeNode::unify_from(const Branch& b)
}
)
)
-
+
os << "* = " << x.m_default;
os << " }";
return os;
@@ -3132,13 +3132,13 @@ void DecisionTreeGen::generate_tree_code(
)
{
TRACE_FUNCTION_F("top_ty=" << top_ty << ", field_path_ofs=" << field_path_ofs << ", top_val=" << top_val << ", node=" << node);
-
+
::MIR::LValue val;
::HIR::TypeRef ty;
-
+
get_ty_and_val(sp, m_builder.resolve(), top_ty, top_val, node.m_field_path, field_path_ofs, ty, val);
DEBUG("ty = " << ty << ", val = " << val);
-
+
TU_MATCHA( (ty.m_data), (e),
(Infer, BUG(sp, "Ivar for in match type"); ),
(Diverge, BUG(sp, "Diverge in match type"); ),
@@ -3265,7 +3265,7 @@ void DecisionTreeGen::generate_branch(const DecisionTreeNode::Branch& branch, ::
else {
assert( branch.is_Subtree() );
const auto& subnode = *branch.as_Subtree();
-
+
cb(subnode);
}
}
@@ -3279,16 +3279,16 @@ void DecisionTreeGen::generate_branches_Signed(
)
{
auto default_block = m_builder.new_bb_unlinked();
-
+
// TODO: Convert into an integer switch w/ offset instead of chained comparisons
-
+
for( const auto& branch : branches )
{
auto next_block = (&branch == &branches.back() ? default_block : m_builder.new_bb_unlinked());
-
+
auto val_start = m_builder.lvalue_or_temp(sp, ty, ::MIR::Constant(branch.first.start));
auto val_end = (branch.first.end == branch.first.start ? val_start.clone() : m_builder.lvalue_or_temp(sp, ty, ::MIR::Constant(branch.first.end)));
-
+
auto cmp_gt_block = m_builder.new_bb_unlinked();
auto val_cmp_lt = m_builder.lvalue_or_temp(sp, ::HIR::TypeRef(::HIR::CoreType::Bool), ::MIR::RValue::make_BinOp({
val.clone(), ::MIR::eBinOp::LT, mv$(val_start)
@@ -3300,14 +3300,14 @@ void DecisionTreeGen::generate_branches_Signed(
val.clone(), ::MIR::eBinOp::GT, mv$(val_end)
}) );
m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_gt), next_block, success_block }) );
-
+
m_builder.set_cur_block( success_block );
this->generate_branch(branch.second, and_then);
-
+
m_builder.set_cur_block( next_block );
}
assert( m_builder.block_active() );
-
+
if( default_branch.is_Unset() ) {
// TODO: Emit error if non-exhaustive
m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
@@ -3326,16 +3326,16 @@ void DecisionTreeGen::generate_branches_Unsigned(
)
{
auto default_block = m_builder.new_bb_unlinked();
-
+
// TODO: Convert into an integer switch w/ offset instead of chained comparisons
-
+
for( const auto& branch : branches )
{
auto next_block = (&branch == &branches.back() ? default_block : m_builder.new_bb_unlinked());
-
+
auto val_start = m_builder.lvalue_or_temp(sp, ty, ::MIR::Constant(branch.first.start));
auto val_end = (branch.first.end == branch.first.start ? val_start.clone() : m_builder.lvalue_or_temp(sp, ty, ::MIR::Constant(branch.first.end)));
-
+
auto cmp_gt_block = m_builder.new_bb_unlinked();
auto val_cmp_lt = m_builder.lvalue_or_temp(sp, ::HIR::TypeRef(::HIR::CoreType::Bool), ::MIR::RValue::make_BinOp({
val.clone(), ::MIR::eBinOp::LT, mv$(val_start)
@@ -3347,14 +3347,14 @@ void DecisionTreeGen::generate_branches_Unsigned(
val.clone(), ::MIR::eBinOp::GT, mv$(val_end)
}) );
m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_gt), next_block, success_block }) );
-
+
m_builder.set_cur_block( success_block );
this->generate_branch(branch.second, and_then);
-
+
m_builder.set_cur_block( next_block );
}
assert( m_builder.block_active() );
-
+
if( default_branch.is_Unset() ) {
// TODO: Emit error if non-exhaustive
m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
@@ -3373,14 +3373,14 @@ void DecisionTreeGen::generate_branches_Float(
)
{
auto default_block = m_builder.new_bb_unlinked();
-
+
for( const auto& branch : branches )
{
auto next_block = (&branch == &branches.back() ? default_block : m_builder.new_bb_unlinked());
-
+
auto val_start = m_builder.lvalue_or_temp(sp, ty, ::MIR::Constant(branch.first.start));
auto val_end = (branch.first.end == branch.first.start ? val_start.clone() : m_builder.lvalue_or_temp(sp, ty, ::MIR::Constant(branch.first.end)));
-
+
auto cmp_gt_block = m_builder.new_bb_unlinked();
auto val_cmp_lt = m_builder.lvalue_or_temp(sp, ::HIR::TypeRef(::HIR::CoreType::Bool), ::MIR::RValue::make_BinOp({
val.clone(), ::MIR::eBinOp::LT, mv$(val_start)
@@ -3392,14 +3392,14 @@ void DecisionTreeGen::generate_branches_Float(
val.clone(), ::MIR::eBinOp::GT, mv$(val_end)
}) );
m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_gt), next_block, success_block }) );
-
+
m_builder.set_cur_block( success_block );
this->generate_branch(branch.second, and_then);
-
+
m_builder.set_cur_block( next_block );
}
assert( m_builder.block_active() );
-
+
if( default_branch.is_Unset() ) {
ERROR(sp, E0000, "Match over floating point with no `_` arm");
}
@@ -3417,16 +3417,16 @@ void DecisionTreeGen::generate_branches_Char(
)
{
auto default_block = m_builder.new_bb_unlinked();
-
+
// TODO: Convert into an integer switch w/ offset instead of chained comparisons
-
+
for( const auto& branch : branches )
{
auto next_block = (&branch == &branches.back() ? default_block : m_builder.new_bb_unlinked());
-
+
auto val_start = m_builder.lvalue_or_temp(sp, ty, ::MIR::Constant(branch.first.start));
auto val_end = (branch.first.end == branch.first.start ? val_start.clone() : m_builder.lvalue_or_temp(sp, ty, ::MIR::Constant(branch.first.end)));
-
+
auto cmp_gt_block = m_builder.new_bb_unlinked();
auto val_cmp_lt = m_builder.lvalue_or_temp( sp, ::HIR::TypeRef(::HIR::CoreType::Bool), ::MIR::RValue::make_BinOp({
val.clone(), ::MIR::eBinOp::LT, mv$(val_start)
@@ -3438,14 +3438,14 @@ void DecisionTreeGen::generate_branches_Char(
val.clone(), ::MIR::eBinOp::GT, mv$(val_end)
}) );
m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_gt), next_block, success_block }) );
-
+
m_builder.set_cur_block( success_block );
this->generate_branch(branch.second, and_then);
-
+
m_builder.set_cur_block( next_block );
}
assert( m_builder.block_active() );
-
+
if( default_branch.is_Unset() ) {
// TODO: Error if not exhaustive.
m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
@@ -3463,7 +3463,7 @@ void DecisionTreeGen::generate_branches_Bool(
)
{
//assert( ty.m_data.is_Boolean() );
-
+
if( default_branch.is_Unset() )
{
if( branches.false_branch.is_Unset() || branches.true_branch.is_Unset() ) {
@@ -3476,16 +3476,16 @@ void DecisionTreeGen::generate_branches_Bool(
// Unreachable default (NOTE: Not an error here)
}
}
-
+
// Emit an if based on the route taken
auto bb_false = m_builder.new_bb_unlinked();
auto bb_true = m_builder.new_bb_unlinked();
m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val), bb_true, bb_false }) );
-
+
// Recurse into sub-patterns
const auto& branch_false = ( !branches.false_branch.is_Unset() ? branches.false_branch : default_branch );
const auto& branch_true = ( !branches. true_branch.is_Unset() ? branches. true_branch : default_branch );
-
+
m_builder.set_cur_block(bb_true );
this->generate_branch(branch_true , and_then);
m_builder.set_cur_block(bb_false);
@@ -3505,35 +3505,35 @@ void DecisionTreeGen::generate_branches_Borrow_str(
// - rustc emits calls to PartialEq::eq for this and for slices. mrustc could use PartialOrd and fall back to PartialEq if unavaliable?
// > Requires crate access here! - A memcmp call is probably better, probably via a binop
// NOTE: The below implementation gets the final codegen to call memcmp on the strings by emitting eBinOp::{LT,GT}
-
+
// - Remove the wrapping Deref (which must be there)
ASSERT_BUG(sp, val.is_Deref(), "Match over str without a deref - " << val);
auto tmp = mv$( *val.as_Deref().val );
val = mv$(tmp);
-
+
auto default_bb = m_builder.new_bb_unlinked();
-
+
assert( !branches.empty() );
for(const auto& branch : branches)
{
auto have_val = val.clone();
-
+
auto next_bb = (&branch == &branches.back() ? default_bb : m_builder.new_bb_unlinked());
-
+
auto test_val = m_builder.lvalue_or_temp(sp, ::HIR::TypeRef::new_borrow(::HIR::BorrowType::Shared, ::HIR::CoreType::Str), ::MIR::Constant(branch.first) );
auto cmp_gt_bb = m_builder.new_bb_unlinked();
-
+
auto lt_val = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ have_val.clone(), ::MIR::eBinOp::LT, test_val.clone() }) );
m_builder.end_block( ::MIR::Terminator::make_If({ mv$(lt_val), default_bb, cmp_gt_bb }) );
m_builder.set_cur_block(cmp_gt_bb);
-
+
auto eq_bb = m_builder.new_bb_unlinked();
auto gt_val = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Bool, ::MIR::RValue::make_BinOp({ mv$(have_val), ::MIR::eBinOp::GT, test_val.clone() }) );
m_builder.end_block( ::MIR::Terminator::make_If({ mv$(gt_val), next_bb, eq_bb }) );
m_builder.set_cur_block(eq_bb);
-
+
this->generate_branch(branch.second, and_then);
-
+
m_builder.set_cur_block(next_bb);
}
this->generate_branch(default_branch, and_then);
@@ -3553,7 +3553,7 @@ void DecisionTreeGen::generate_branches_Enum(
const auto& variants = enum_ref.m_variants;
auto variant_count = variants.size();
bool has_any = ! default_branch.is_Unset();
-
+
if( branches.size() < variant_count && ! has_any ) {
ERROR(sp, E0000, "Non-exhaustive match over " << ty << " - " << branches.size() << " out of " << variant_count << " present");
}
@@ -3561,9 +3561,9 @@ void DecisionTreeGen::generate_branches_Enum(
//if( branches.size() == variant_count && has_any ) {
// ERROR(sp, E0000, "Unreachable _ arm - " << branches.size() << " variants in " << enum_path);
//}
-
+
auto any_block = (has_any ? m_builder.new_bb_unlinked() : 0);
-
+
// Emit a switch over the variant
::std::vector< ::MIR::BasicBlockId> variant_blocks;
variant_blocks.reserve( variant_count );
@@ -3583,22 +3583,22 @@ void DecisionTreeGen::generate_branches_Enum(
variant_blocks.resize( variant_count, any_block );
}
bool any_arm_used = ::std::any_of( variant_blocks.begin(), variant_blocks.end(), [any_block](const auto& blk){ return blk == any_block; } );
-
+
m_builder.end_block( ::MIR::Terminator::make_Switch({
val.clone(), variant_blocks // NOTE: Copies the list, so it can be used lower down
}) );
-
+
// Emit sub-patterns, looping over variants
for( const auto& branch : branches )
{
auto bb = variant_blocks[branch.first];
const auto& var = variants[branch.first];
DEBUG(branch.first << " " << var.first << " = " << branch.second);
-
+
auto var_lval = ::MIR::LValue::make_Downcast({ box$(val.clone()), branch.first });
-
+
::HIR::TypeRef fake_ty;
-
+
TU_MATCHA( (var.second), (e),
(Unit,
DEBUG("- Unit");
@@ -3628,7 +3628,7 @@ void DecisionTreeGen::generate_branches_Enum(
DEBUG("- Struct - " << fake_ty);
)
)
-
+
m_builder.set_cur_block( bb );
if( fake_ty == ::HIR::TypeRef() || fake_ty.m_data.as_Tuple().size() == 0 ) {
this->generate_branch(branch.second, and_then);
@@ -3640,7 +3640,7 @@ void DecisionTreeGen::generate_branches_Enum(
});
}
}
-
+
if( any_arm_used )
{
DEBUG("_ = " << default_branch);
@@ -3668,46 +3668,46 @@ void DecisionTreeGen::generate_branches_Slice(
if( default_branch.is_Unset() ) {
ERROR(sp, E0000, "Non-exhaustive match over " << ty);
}
-
+
// NOTE: Un-deref the slice
ASSERT_BUG(sp, val.is_Deref(), "slice matches must be passed a deref");
auto tmp = mv$( *val.as_Deref().val );
val = mv$(tmp);
-
+
auto any_block = m_builder.new_bb_unlinked();
-
+
// TODO: Select one of three ways of picking the arm:
// - Integer switch (unimplemented)
// - Binary search
// - Sequential comparisons
-
+
auto val_len = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Usize, ::MIR::RValue::make_DstMeta({ val.clone() }));
-
+
// TODO: Binary search instead.
for( const auto& branch : branches.fixed_arms )
{
auto val_des = m_builder.lvalue_or_temp(sp, ::HIR::CoreType::Usize, ::MIR::Constant(static_cast<uint64_t>(branch.first)));
-
+
// Special case - final just does equality
if( &branch == &branches.fixed_arms.back() )
{
auto val_cmp_eq = m_builder.lvalue_or_temp( sp, ::HIR::TypeRef(::HIR::CoreType::Bool), ::MIR::RValue::make_BinOp({
val_len.clone(), ::MIR::eBinOp::EQ, mv$(val_des)
}) );
-
+
auto success_block = m_builder.new_bb_unlinked();
m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_eq), any_block, success_block }) );
-
+
m_builder.set_cur_block( success_block );
this->generate_branch(branch.second, and_then);
-
+
m_builder.set_cur_block( any_block );
}
// TODO: Special case for zero (which can't have a LT)
else
{
auto next_block = m_builder.new_bb_unlinked();
-
+
auto cmp_gt_block = m_builder.new_bb_unlinked();
auto val_cmp_lt = m_builder.lvalue_or_temp( sp, ::HIR::TypeRef(::HIR::CoreType::Bool), ::MIR::RValue::make_BinOp({
val_len.clone(), ::MIR::eBinOp::LT, val_des.clone()
@@ -3719,15 +3719,15 @@ void DecisionTreeGen::generate_branches_Slice(
val_len.clone(), ::MIR::eBinOp::GT, mv$(val_des)
}) );
m_builder.end_block( ::MIR::Terminator::make_If({ mv$(val_cmp_gt), next_block, success_block }) );
-
+
m_builder.set_cur_block( success_block );
this->generate_branch(branch.second, and_then);
-
+
m_builder.set_cur_block( next_block );
}
}
assert( m_builder.block_active() );
-
+
if( default_branch.is_Unset() ) {
// TODO: Emit error if non-exhaustive
m_builder.end_block( ::MIR::Terminator::make_Diverge({}) );
diff --git a/src/mir/helpers.cpp b/src/mir/helpers.cpp
index dc149cd2..027c68b7 100644
--- a/src/mir/helpers.cpp
+++ b/src/mir/helpers.cpp
@@ -206,7 +206,7 @@ const ::HIR::TypeRef& ::MIR::TypeResolve::get_lvalue_type(::HIR::TypeRef& tmp, c
MIR_ASSERT(*this, e.variant_index < unm.m_variants.size(), "Variant index out of range");
const auto& variant = unm.m_variants[e.variant_index];
const auto& var_ty = variant.second.ent;
-
+
if( monomorphise_type_needed(var_ty) ) {
tmp = monomorphise_type(sp, unm.m_params, te.path.m_data.as_Generic().m_params, variant.second.ent);
m_resolve.expand_associated_types(sp, tmp);
diff --git a/src/mir/helpers.hpp b/src/mir/helpers.hpp
index b0499ec8..40dbd4a3 100644
--- a/src/mir/helpers.hpp
+++ b/src/mir/helpers.hpp
@@ -40,7 +40,7 @@ public:
typedef ::std::vector< ::std::pair< ::HIR::Pattern, ::HIR::TypeRef> > args_t;
private:
const unsigned int STMT_TERM = ~0u;
-
+
public:
const Span& sp;
const ::StaticTraitResolve& m_resolve;
@@ -51,7 +51,7 @@ private:
const args_t& m_args;
const ::MIR::Function& m_fcn;
const ::HIR::SimplePath* m_lang_Box = nullptr;
-
+
unsigned int bb_idx = 0;
unsigned int stmt_idx = 0;
@@ -69,7 +69,7 @@ public:
m_lang_Box = &m_crate.m_lang_items.at("owned_box");
}
}
-
+
void set_cur_stmt(unsigned int bb_idx, unsigned int stmt_idx) {
this->bb_idx = bb_idx;
this->stmt_idx = stmt_idx;
@@ -78,7 +78,7 @@ public:
this->bb_idx = bb_idx;
this->stmt_idx = STMT_TERM;
}
-
+
void print_bug(::std::function<void(::std::ostream& os)> cb) const {
print_msg("ERROR", cb);
}
@@ -86,12 +86,12 @@ public:
print_msg("TODO", cb);
}
void print_msg(const char* tag, ::std::function<void(::std::ostream& os)> cb) const;
-
+
const ::MIR::BasicBlock& get_block(::MIR::BasicBlockId id) const;
-
+
const ::HIR::TypeRef& get_static_type(::HIR::TypeRef& tmp, const ::HIR::Path& path) const;
const ::HIR::TypeRef& get_lvalue_type(::HIR::TypeRef& tmp, const ::MIR::LValue& val) const;
-
+
const ::HIR::TypeRef* is_type_owned_box(const ::HIR::TypeRef& ty) const;
};
diff --git a/src/mir/mir.cpp b/src/mir/mir.cpp
index d7ae41cc..f7a5f3e8 100644
--- a/src/mir/mir.cpp
+++ b/src/mir/mir.cpp
@@ -50,7 +50,7 @@ namespace MIR {
)
return os;
}
-
+
::std::ostream& operator<<(::std::ostream& os, const LValue& x)
{
TU_MATCHA( (x), (e),
@@ -116,13 +116,13 @@ namespace MIR {
case ::MIR::eBinOp::SUB_OV: os << "SUB_OV"; break;
case ::MIR::eBinOp::MUL_OV: os << "MUL_OV"; break;
case ::MIR::eBinOp::DIV_OV: os << "DIV_OV"; break;
-
+
case ::MIR::eBinOp::BIT_OR : os << "BIT_OR" ; break;
case ::MIR::eBinOp::BIT_AND: os << "BIT_AND"; break;
case ::MIR::eBinOp::BIT_XOR: os << "BIT_XOR"; break;
case ::MIR::eBinOp::BIT_SHL: os << "BIT_SHL"; break;
case ::MIR::eBinOp::BIT_SHR: os << "BIT_SHR"; break;
-
+
case ::MIR::eBinOp::EQ: os << "EQ"; break;
case ::MIR::eBinOp::NE: os << "NE"; break;
case ::MIR::eBinOp::GT: os << "GT"; break;
@@ -159,7 +159,7 @@ namespace MIR {
)
return os;
}
-
+
::std::ostream& operator<<(::std::ostream& os, const Terminator& x)
{
TU_MATCHA( (x), (e),
diff --git a/src/mir/mir.hpp b/src/mir/mir.hpp
index 1ac8ff78..5a6b83de 100644
--- a/src/mir/mir.hpp
+++ b/src/mir/mir.hpp
@@ -66,14 +66,14 @@ enum class eBinOp
MUL, MUL_OV,
DIV, DIV_OV,
MOD,// MOD_OV,
-
+
BIT_OR,
BIT_AND,
BIT_XOR,
-
+
BIT_SHR,
BIT_SHL,
-
+
EQ, NE,
GT, GE,
LT, LE,
@@ -222,7 +222,7 @@ class Function
public:
::std::vector< ::HIR::TypeRef> named_variables;
::std::vector< ::HIR::TypeRef> temporaries;
-
+
::std::vector<BasicBlock> blocks;
};
diff --git a/src/mir/mir_builder.cpp b/src/mir/mir_builder.cpp
index 367c4aa1..cd7ddbcd 100644
--- a/src/mir/mir_builder.cpp
+++ b/src/mir/mir_builder.cpp
@@ -24,14 +24,14 @@ MirBuilder::MirBuilder(const Span& sp, const StaticTraitResolve& resolve, const
if( resolve.m_crate.m_lang_items.count("owned_box") > 0 ) {
m_lang_Box = &resolve.m_crate.m_lang_items.at("owned_box");
}
-
+
set_cur_block( new_bb_unlinked() );
m_scopes.push_back( ScopeDef { sp } );
m_scope_stack.push_back( 0 );
-
+
m_scopes.push_back( ScopeDef { sp, ScopeType::make_Temporaries({}) } );
m_scope_stack.push_back( 1 );
-
+
m_variable_states.resize( output.named_variables.size(), VarState::Uninit );
}
MirBuilder::~MirBuilder()
@@ -57,12 +57,12 @@ const ::HIR::TypeRef* MirBuilder::is_type_owned_box(const ::HIR::TypeRef& ty) co
return nullptr;
}
const auto& te = ty.m_data.as_Path();
-
+
if( ! te.path.m_data.is_Generic() ) {
return nullptr;
}
const auto& pe = te.path.m_data.as_Generic();
-
+
if( pe.m_path != *m_lang_Box ) {
return nullptr;
}
@@ -101,11 +101,11 @@ void MirBuilder::define_variable(unsigned int idx)
{
unsigned int rv = m_output.temporaries.size();
DEBUG("DEFINE tmp" << rv << ": " << ty);
-
+
m_output.temporaries.push_back( ty.clone() );
m_temporary_states.push_back( VarState::Uninit );
assert(m_output.temporaries.size() == m_temporary_states.size());
-
+
ScopeDef* top_scope = nullptr;
for(unsigned int i = m_scope_stack.size(); i --; )
{
@@ -179,7 +179,7 @@ void MirBuilder::push_stmt_assign(const Span& sp, ::MIR::LValue dst, ::MIR::RVal
ASSERT_BUG(sp, m_block_active, "Pushing statement with no active block");
ASSERT_BUG(sp, dst.tag() != ::MIR::LValue::TAGDEAD, "");
ASSERT_BUG(sp, val.tag() != ::MIR::RValue::TAGDEAD, "");
-
+
TU_MATCHA( (val), (e),
(Use,
this->moved_lvalue(sp, e);
@@ -248,7 +248,7 @@ void MirBuilder::push_stmt_assign(const Span& sp, ::MIR::LValue dst, ::MIR::RVal
this->moved_lvalue(sp, val);
)
)
-
+
// Drop target if populated
mark_value_assigned(sp, dst);
m_output.blocks.at(m_current_block).statements.push_back( ::MIR::Statement::make_Assign({ mv$(dst), mv$(val) }) );
@@ -257,25 +257,25 @@ void MirBuilder::push_stmt_drop(const Span& sp, ::MIR::LValue val)
{
ASSERT_BUG(sp, m_block_active, "Pushing statement with no active block");
ASSERT_BUG(sp, val.tag() != ::MIR::LValue::TAGDEAD, "");
-
+
if( lvalue_is_copy(sp, val) ) {
// Don't emit a drop for Copy values
return ;
}
-
+
m_output.blocks.at(m_current_block).statements.push_back( ::MIR::Statement::make_Drop({ ::MIR::eDropKind::DEEP, mv$(val) }) );
}
void MirBuilder::push_stmt_drop_shallow(const Span& sp, ::MIR::LValue val)
{
ASSERT_BUG(sp, m_block_active, "Pushing statement with no active block");
ASSERT_BUG(sp, val.tag() != ::MIR::LValue::TAGDEAD, "");
-
+
// TODO: Ensure that the type is a Box
//if( lvalue_is_copy(sp, val) ) {
// // Don't emit a drop for Copy values
// return ;
//}
-
+
m_output.blocks.at(m_current_block).statements.push_back( ::MIR::Statement::make_Drop({ ::MIR::eDropKind::SHALLOW, mv$(val) }) );
}
@@ -353,7 +353,7 @@ void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val)
while( scope_it != m_scope_stack.rend() )
{
auto& scope_def = m_scopes.at(*scope_it);
-
+
TU_IFLET( ScopeType, scope_def.data, Variables, e,
auto tmp_it = ::std::find( e.vars.begin(), e.vars.end(), idx );
if( tmp_it != e.vars.end() )
@@ -371,11 +371,11 @@ void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val)
return ;
}
++scope_it;
-
+
while( scope_it != m_scope_stack.rend() )
{
auto& scope_def = m_scopes.at(*scope_it);
-
+
TU_IFLET( ScopeType, scope_def.data, Variables, e,
e.vars.push_back( idx );
DEBUG("- to " << *scope_it);
@@ -383,7 +383,7 @@ void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val)
)
++scope_it;
}
-
+
DEBUG("- top");
),
(Temporary,
@@ -392,7 +392,7 @@ void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val)
while( scope_it != m_scope_stack.rend() )
{
auto& scope_def = m_scopes.at(*scope_it);
-
+
TU_IFLET( ScopeType, scope_def.data, Temporaries, e,
auto tmp_it = ::std::find( e.temporaries.begin(), e.temporaries.end(), idx );
if( tmp_it != e.temporaries.end() )
@@ -410,11 +410,11 @@ void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val)
return ;
}
++scope_it;
-
+
while( scope_it != m_scope_stack.rend() )
{
auto& scope_def = m_scopes.at(*scope_it);
-
+
TU_IFLET( ScopeType, scope_def.data, Temporaries, e,
e.temporaries.push_back( idx );
DEBUG("- to " << *scope_it);
@@ -422,7 +422,7 @@ void MirBuilder::raise_variables(const Span& sp, const ::MIR::LValue& val)
)
++scope_it;
}
-
+
DEBUG("- top");
)
)
@@ -571,42 +571,42 @@ void MirBuilder::terminate_scope(const Span& sp, ScopeHandle scope, bool emit_cl
BUG(sp, "Terminating scope not on the stack - scope " << scope.idx);
BUG(sp, "Terminating scope " << scope.idx << " when not at top of stack, " << (m_scope_stack.end() - it - 1) << " scopes in the way");
}
-
+
auto& scope_def = m_scopes.at(scope.idx);
ASSERT_BUG( sp, scope_def.complete == false, "Terminating scope which is already terminated" );
-
+
if( emit_cleanup )
{
// 2. Emit drops for all non-moved variables (share with below)
drop_scope_values(scope_def);
}
-
+
// 3. Pop scope (last because `drop_scope_values` uses the stack)
m_scope_stack.pop_back();
-
+
complete_scope(scope_def);
}
void MirBuilder::terminate_scope_early(const Span& sp, const ScopeHandle& scope)
{
DEBUG("EARLY scope " << scope.idx);
-
+
// 1. Ensure that this block is in the stack
auto it = ::std::find( m_scope_stack.begin(), m_scope_stack.end(), scope.idx );
if( it == m_scope_stack.end() ) {
BUG(sp, "Early-terminating scope not on the stack");
}
unsigned int slot = it - m_scope_stack.begin();
-
+
bool is_conditional = false;
for(unsigned int i = m_scope_stack.size(); i-- > slot; )
{
auto idx = m_scope_stack[i];
auto& scope_def = m_scopes.at( idx );
-
+
// If a conditional block is hit, prevent full termination of the rest
if( scope_def.data.is_Split() || scope_def.data.is_Loop() )
is_conditional = true;
-
+
if( !is_conditional ) {
DEBUG("Complete scope " << idx);
drop_scope_values(scope_def);
@@ -616,7 +616,7 @@ void MirBuilder::terminate_scope_early(const Span& sp, const ScopeHandle& scope)
else {
// Mark patial within this scope?
DEBUG("Drop part of scope " << idx);
-
+
// Emit drops for dropped values within this scope
drop_scope_values(scope_def);
// Inform the scope that it's been early-exited
@@ -633,9 +633,9 @@ void MirBuilder::end_split_arm(const Span& sp, const ScopeHandle& handle, bool r
ASSERT_BUG(sp, sd.data.is_Split(), "");
auto& sd_split = sd.data.as_Split();
ASSERT_BUG(sp, !sd_split.arms.empty(), "");
-
+
sd_split.arms.back().always_early_terminated = /*sd_split.arms.back().has_early_terminated &&*/ !reachable;
-
+
// HACK: If this arm's end is reachable, convert InnerMoved (shallow drop) variable states to Moved
// - I'm not 100% sure this is the correct place for calling drop.
if( reachable )
@@ -651,7 +651,7 @@ void MirBuilder::end_split_arm(const Span& sp, const ScopeHandle& handle, bool r
}
}
}
-
+
sd_split.arms.push_back( {} );
}
void MirBuilder::end_split_arm_early(const Span& sp)
@@ -666,13 +666,13 @@ void MirBuilder::end_split_arm_early(const Span& sp)
m_scope_stack.pop_back();
complete_scope(scope_def);
}
-
+
if( !m_scope_stack.empty() && m_scopes.at( m_scope_stack.back() ).data.is_Split() )
{
auto& sd = m_scopes[ m_scope_stack.back() ];
auto& sd_split = sd.data.as_Split();
sd_split.arms.back().has_early_terminated = true;
-
+
const auto& vss = sd_split.arms.back().var_states;
for(unsigned int i = 0; i < vss.size(); i ++ )
{
@@ -688,7 +688,7 @@ void MirBuilder::end_split_arm_early(const Span& sp)
void MirBuilder::complete_scope(ScopeDef& sd)
{
sd.complete = true;
-
+
TU_MATCHA( (sd.data), (e),
(Temporaries,
DEBUG("Temporaries " << e.temporaries);
@@ -702,16 +702,16 @@ void MirBuilder::complete_scope(ScopeDef& sd)
(Split,
)
)
-
+
// No macro for better debug output.
if( sd.data.is_Split() )
{
auto& e = sd.data.as_Split();
-
+
assert( e.arms.size() > 1 );
TRACE_FUNCTION_F("Split - " << (e.arms.size() - 1) << " arms");
e.arms.pop_back();
-
+
// Merge all arms and apply upwards
size_t var_count = 0;
size_t tmp_count = 0;
@@ -720,18 +720,18 @@ void MirBuilder::complete_scope(ScopeDef& sd)
var_count = ::std::max(var_count, arm.var_states.size());
tmp_count = ::std::max(tmp_count, arm.tmp_states.size());
}
-
+
struct StateMerger
{
::std::vector<bool> m_changed;
::std::vector<VarState> m_new_states;
-
+
StateMerger(size_t var_count):
m_changed(var_count),
m_new_states(var_count)
{
}
-
+
void merge_arm_state(const Span& sp, unsigned int i, bool has_changed, VarState new_state)
{
assert(i < this->m_new_states.size());
@@ -828,7 +828,7 @@ void MirBuilder::complete_scope(ScopeDef& sd)
}
}
};
-
+
StateMerger sm_var { var_count };
StateMerger sm_tmp { tmp_count };
for(const auto& arm : e.arms)
@@ -841,7 +841,7 @@ void MirBuilder::complete_scope(ScopeDef& sd)
{
sm_var.merge_arm_state(sd.span, i, arm.changed_var_states[i], arm.var_states[i]);
}
-
+
DEBUG(">TMP<");
assert( arm.changed_tmp_states.size() == arm.tmp_states.size() );
for(unsigned int i = 0; i < arm.tmp_states.size(); i ++ )
@@ -849,7 +849,7 @@ void MirBuilder::complete_scope(ScopeDef& sd)
sm_tmp.merge_arm_state(sd.span, i, arm.changed_tmp_states[i], arm.tmp_states[i]);
}
}
-
+
for(unsigned int i = 0; i < var_count; i ++ )
{
if( sm_var.m_changed[i] )
@@ -1083,7 +1083,7 @@ VarState MirBuilder::get_variable_state(const Span& sp, unsigned int idx) const
)
)
}
-
+
ASSERT_BUG(sp, idx < m_variable_states.size(), "Variable " << idx << " out of range for state table");
return m_variable_states[idx];
}
@@ -1114,7 +1114,7 @@ void MirBuilder::set_variable_state(const Span& sp, unsigned int idx, VarState s
)
)
}
-
+
ASSERT_BUG(sp, idx < m_variable_states.size(), "Variable " << idx << " out of range for state table");
m_variable_states[idx] = state;
}
@@ -1142,7 +1142,7 @@ VarState MirBuilder::get_temp_state(const Span& sp, unsigned int idx) const
}
}
}
-
+
ASSERT_BUG(sp, idx < m_temporary_states.size(), "Temporary " << idx << " out of range for state table");
return m_temporary_states[idx];
}
@@ -1173,7 +1173,7 @@ void MirBuilder::set_temp_state(const Span& sp, unsigned int idx, VarState state
return ;
}
}
-
+
ASSERT_BUG(sp, idx < m_temporary_states.size(), "Temporary " << idx << " out of range for state table");
m_temporary_states[idx] = state;
}
diff --git a/src/mir/mir_ptr.hpp b/src/mir/mir_ptr.hpp
index 583b155b..9133dd44 100644
--- a/src/mir/mir_ptr.hpp
+++ b/src/mir/mir_ptr.hpp
@@ -19,7 +19,7 @@ public:
FunctionPointer(): ptr(nullptr) {}
FunctionPointer(::MIR::Function* p): ptr(p) {}
FunctionPointer(FunctionPointer&& x): ptr(x.ptr) { x.ptr = nullptr; }
-
+
~FunctionPointer() {
reset();
}
@@ -29,14 +29,14 @@ public:
x.ptr = nullptr;
return *this;
}
-
+
void reset();
-
+
::MIR::Function* operator->() { return ptr; }
::MIR::Function& operator*() { return *ptr; }
const ::MIR::Function* operator->() const { return ptr; }
const ::MIR::Function& operator*() const { return *ptr; }
-
+
operator bool() const { return ptr != nullptr; }
};
diff --git a/src/mir/optimise.cpp b/src/mir/optimise.cpp
index 601b1b18..bb9536b0 100644
--- a/src/mir/optimise.cpp
+++ b/src/mir/optimise.cpp
@@ -31,7 +31,7 @@ namespace {
// Make sure we don't infinite loop
if( bb == target.terminator.as_Goto() )
return bb;
-
+
auto rv = get_new_target(state, target.terminator.as_Goto());
DEBUG(bb << " => " << rv);
return rv;
@@ -44,7 +44,7 @@ void MIR_Optimise(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
static Span sp;
TRACE_FUNCTION_F(path);
::MIR::TypeResolve state { sp, resolve, FMT_CB(ss, ss << path;), ret_type, args, fcn };
-
+
// >> Replace targets that point to a block that is just a goto
for(auto& block : fcn.blocks)
{
@@ -74,7 +74,7 @@ void MIR_Optimise(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
)
)
}
-
+
// >> Merge blocks where a block goto-s to a single-use block.
{
::std::vector<unsigned int> uses( fcn.blocks.size() );
@@ -106,7 +106,7 @@ void MIR_Optimise(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
)
)
}
-
+
unsigned int i = 0;
for(auto& block : fcn.blocks)
{
@@ -116,9 +116,9 @@ void MIR_Optimise(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
if( uses[tgt] != 1 )
break ;
DEBUG("Append bb " << tgt << " to bb" << i);
-
+
assert( &fcn.blocks[tgt] != &block );
-
+
for(auto& stmt : fcn.blocks[tgt].statements)
block.statements.push_back( mv$(stmt) );
block.terminator = mv$( fcn.blocks[tgt].terminator );
@@ -126,17 +126,17 @@ void MIR_Optimise(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
i ++;
}
}
-
+
// >> Combine Duplicate Blocks
// TODO:
-
+
// >> Propagate dead assignments
// TODO: This requires kowing that doing so has no effect.
// - Can use little heristics like a Call pointing to an assignment of its RV
// - Count the read/write count of a variable, if it's 1,1 then this optimisation is correct.
// - If the count is read=*,write=1 and the write is of an argument, replace with the argument.
-
+
// GC pass on blocks and variables
// - Find unused blocks, then delete and rewrite all references.
{
@@ -147,7 +147,7 @@ void MIR_Optimise(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
{
auto bb = to_visit.back(); to_visit.pop_back();
visited[bb] = true;
-
+
const auto& block = fcn.blocks[bb];
TU_MATCHA( (block.terminator), (e),
(Incomplete,
@@ -181,7 +181,7 @@ void MIR_Optimise(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
)
)
}
-
+
::std::vector<unsigned int> rewrite_table;
for(unsigned int i = 0, j = 0; i < fcn.blocks.size(); i ++)
{
@@ -192,7 +192,7 @@ void MIR_Optimise(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
rewrite_table.push_back(~0u);
}
}
-
+
auto it = fcn.blocks.begin();
for(unsigned int i = 0; i < visited.size(); i ++)
{
@@ -230,7 +230,7 @@ void MIR_Optimise(const StaticTraitResolve& resolve, const ::HIR::ItemPath& path
e.panic_block = rewrite_table[e.panic_block];
)
)
-
+
++it;
}
}
diff --git a/src/mir/visit_crate_mir.cpp b/src/mir/visit_crate_mir.cpp
index 4cd1fddc..ba4acf8e 100644
--- a/src/mir/visit_crate_mir.cpp
+++ b/src/mir/visit_crate_mir.cpp
@@ -36,7 +36,7 @@ void MIR::OuterVisitor::visit_function(::HIR::ItemPath p, ::HIR::Function& item)
DEBUG("Function code " << p);
// TODO: Get span without needing hir/expr.hpp
static Span sp;
-
+
// Replace ErasedType instances in `ret_type`
const auto& ret_type = item.m_return;
auto ret_type_v = clone_ty_with(sp, ret_type, [&](const auto& tpl, auto& rv) {
@@ -71,10 +71,10 @@ void MIR::OuterVisitor::visit_constant(::HIR::ItemPath p, ::HIR::Constant& item)
void MIR::OuterVisitor::visit_enum(::HIR::ItemPath p, ::HIR::Enum& item)
{
auto _ = this->m_resolve.set_item_generics(item.m_params);
-
+
// TODO: Use a different type depding on repr()
auto enum_type = ::HIR::TypeRef(::HIR::CoreType::Isize);
-
+
for(auto& var : item.m_variants)
{
TU_IFLET(::HIR::Enum::Variant, var.second, Value, e,
diff --git a/src/mir/visit_crate_mir.hpp b/src/mir/visit_crate_mir.hpp
index e6964545..d01f5ab3 100644
--- a/src/mir/visit_crate_mir.hpp
+++ b/src/mir/visit_crate_mir.hpp
@@ -24,9 +24,9 @@ public:
m_resolve(crate),
m_cb(cb)
{}
-
+
void visit_expr(::HIR::ExprPtr& exp) override;
-
+
void visit_type(::HIR::TypeRef& ty) override;
// ------
@@ -36,7 +36,7 @@ public:
void visit_static(::HIR::ItemPath p, ::HIR::Static& item) override;
void visit_constant(::HIR::ItemPath p, ::HIR::Constant& item) override;
void visit_enum(::HIR::ItemPath p, ::HIR::Enum& item) override;
-
+
// Boilerplate
void visit_trait(::HIR::ItemPath p, ::HIR::Trait& item) override;
void visit_type_impl(::HIR::TypeImpl& impl) override;