summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorJohn Hodge <tpg@ucc.asn.au>2018-06-03 14:57:05 +0800
committerJohn Hodge <tpg@ucc.asn.au>2018-06-03 14:57:05 +0800
commitbf8f8b4b4a9fe273451be59f68acafbe61968b83 (patch)
tree82993550cb3c88de0edbd55d79e4ea8e8cefffac /tools
parent39b3cf53798683e496804f8322da2254b10850f4 (diff)
parenta7fb27789a2b34543851d207120e2c0001ee9c27 (diff)
downloadmrust-bf8f8b4b4a9fe273451be59f68acafbe61968b83.tar.gz
Merge branch 'master' of https://github.com/thepowersgang/mrustc
Diffstat (limited to 'tools')
-rw-r--r--tools/common/debug.cpp8
-rw-r--r--tools/common/debug.h1
-rw-r--r--tools/common/target_detect.h66
-rw-r--r--tools/common/toml.cpp6
-rw-r--r--tools/minicargo/Makefile2
-rw-r--r--tools/minicargo/build.cpp48
-rw-r--r--tools/minicargo/main.cpp28
-rw-r--r--tools/minicargo/manifest.cpp2
-rw-r--r--tools/standalone_miri/Makefile44
-rw-r--r--tools/standalone_miri/debug.cpp35
-rw-r--r--tools/standalone_miri/debug.hpp18
-rw-r--r--tools/standalone_miri/hir_sim.cpp269
-rw-r--r--tools/standalone_miri/hir_sim.hpp43
-rw-r--r--tools/standalone_miri/lex.cpp29
-rw-r--r--tools/standalone_miri/lex.hpp1
-rw-r--r--tools/standalone_miri/main.cpp1971
-rw-r--r--tools/standalone_miri/mir.cpp1
-rw-r--r--tools/standalone_miri/miri.cpp2305
-rw-r--r--tools/standalone_miri/miri.hpp79
-rw-r--r--tools/standalone_miri/module_tree.cpp214
-rw-r--r--tools/standalone_miri/module_tree.hpp17
-rw-r--r--tools/standalone_miri/value.cpp445
-rw-r--r--tools/standalone_miri/value.hpp379
-rw-r--r--tools/testrunner/main.cpp29
24 files changed, 3629 insertions, 2411 deletions
diff --git a/tools/common/debug.cpp b/tools/common/debug.cpp
index a3fb9956..94d8ed99 100644
--- a/tools/common/debug.cpp
+++ b/tools/common/debug.cpp
@@ -34,6 +34,14 @@ void Debug_DisablePhase(const char* phase_name)
{
gmDisabledDebug.insert( ::std::string(phase_name) );
}
+void Debug_EnablePhase(const char* phase_name)
+{
+ auto it = gmDisabledDebug.find(phase_name);
+ if( it != gmDisabledDebug.end() )
+ {
+ gmDisabledDebug.erase(it);
+ }
+}
void Debug_Print(::std::function<void(::std::ostream& os)> cb)
{
if( !Debug_IsEnabled() )
diff --git a/tools/common/debug.h b/tools/common/debug.h
index ace00876..86c88de9 100644
--- a/tools/common/debug.h
+++ b/tools/common/debug.h
@@ -7,6 +7,7 @@
typedef ::std::function<void(::std::ostream& os)> dbg_cb_t;
extern void Debug_SetPhase(const char* phase_name);
extern void Debug_DisablePhase(const char* phase_name);
+extern void Debug_EnablePhase(const char* phase_name);
extern bool Debug_IsEnabled();
extern void Debug_EnterScope(const char* name, dbg_cb_t );
extern void Debug_LeaveScope(const char* name, dbg_cb_t );
diff --git a/tools/common/target_detect.h b/tools/common/target_detect.h
new file mode 100644
index 00000000..995ab6a4
--- /dev/null
+++ b/tools/common/target_detect.h
@@ -0,0 +1,66 @@
+/*
+ * MRustC - Rust Compiler
+ * - By John Hodge (Mutabah/thePowersGang)
+ *
+ * common/target_detect.h
+ * - Auto-magical host target detection
+ */
+#pragma once
+
+// - Windows (MSVC)
+#ifdef _MSC_VER
+# if defined(_WIN64)
+# define DEFAULT_TARGET_NAME "x86_64-windows-msvc"
+# else
+# define DEFAULT_TARGET_NAME "x86-windows-msvc"
+# endif
+// - Linux
+#elif defined(__linux__)
+# if defined(__amd64__)
+# define DEFAULT_TARGET_NAME "x86_64-linux-gnu"
+# elif defined(__aarch64__)
+# define DEFAULT_TARGET_NAME "aarch64-linux-gnu"
+# elif defined(__arm__)
+# define DEFAULT_TARGET_NAME "arm-linux-gnu"
+# elif defined(__i386__)
+# define DEFAULT_TARGET_NAME "i586-linux-gnu"
+# else
+# warning "Unable to detect a suitable default target (linux-gnu)"
+# endif
+// - MinGW
+#elif defined(__MINGW32__)
+# if defined(_WIN64)
+# define DEFAULT_TARGET_NAME "x86_64-windows-gnu"
+# else
+# define DEFAULT_TARGET_NAME "i586-windows-gnu"
+# endif
+// - NetBSD
+#elif defined(__NetBSD__)
+# if defined(__amd64__)
+# define DEFAULT_TARGET_NAME "x86_64-unknown-netbsd"
+# else
+# warning "Unable to detect a suitable default target (NetBSD)"
+# endif
+// - OpenBSD
+#elif defined(__OpenBSD__)
+# if defined(__amd64__)
+# define DEFAULT_TARGET_NAME "x86_64-unknown-openbsd"
+# elif defined(__aarch64__)
+# define DEFAULT_TARGET_NAME "aarch64-unknown-openbsd"
+# elif defined(__arm__)
+# define DEFAULT_TARGET_NAME "arm-unknown-openbsd"
+# elif defined(__i386__)
+# define DEFAULT_TARGET_NAME "i686-unknown-openbsd"
+# else
+# warning "Unable to detect a suitable default target (OpenBSD)"
+# endif
+// - Apple devices
+#elif defined(__APPLE__)
+# define DEFAULT_TARGET_NAME "x86_64-apple-macosx"
+// - Unknown
+#else
+# warning "Unable to detect a suitable default target"
+#endif
+#ifndef DEFAULT_TARGET_NAME
+# define DEFAULT_TARGET_NAME ""
+#endif
diff --git a/tools/common/toml.cpp b/tools/common/toml.cpp
index 9fad0ec4..75a93810 100644
--- a/tools/common/toml.cpp
+++ b/tools/common/toml.cpp
@@ -170,9 +170,11 @@ TomlKeyValue TomlFile::get_next_value()
throw ::std::runtime_error(::format("Unexpected token after key - ", t));
t = Token::lex_from(m_if);
+ // --- Value ---
TomlKeyValue rv;
switch(t.m_type)
{
+ // String: Return the string value
case Token::Type::String:
rv.path = m_current_block;
rv.path.insert(rv.path.end(), m_current_composite.begin(), m_current_composite.end());
@@ -180,6 +182,7 @@ TomlKeyValue TomlFile::get_next_value()
rv.value = TomlValue { t.m_data };
break;
+ // Array: Parse the entire list and return as Type::List
case Token::Type::SquareOpen:
rv.path = m_current_block;
rv.path.insert(rv.path.end(), m_current_composite.begin(), m_current_composite.end());
@@ -193,7 +196,8 @@ TomlKeyValue TomlFile::get_next_value()
if( t.m_type == Token::Type::SquareClose )
break;
- // TODO: Recurse parse a value
+ // TODO: Recursively parse a value
+ // TODO: OR, support other value types
switch(t.m_type)
{
case Token::Type::String:
diff --git a/tools/minicargo/Makefile b/tools/minicargo/Makefile
index 01010fb5..363ef4b9 100644
--- a/tools/minicargo/Makefile
+++ b/tools/minicargo/Makefile
@@ -38,7 +38,7 @@ $(OBJDIR)%.o: %.cpp
@echo [CXX] $<
$V$(CXX) -o $@ -c $< $(CXXFLAGS) -MMD -MP -MF $@.dep
-../bin/common_lib.a:
+../bin/common_lib.a: $(wildcard ../common/*.* ../common/Makefile)
make -C ../common
-include $(OBJS:%.o=%.o.dep)
diff --git a/tools/minicargo/build.cpp b/tools/minicargo/build.cpp
index c7c07256..42e19552 100644
--- a/tools/minicargo/build.cpp
+++ b/tools/minicargo/build.cpp
@@ -42,22 +42,17 @@ extern int _putenv_s(const char*, const char*);
# include <sys/wait.h>
# include <fcntl.h>
#endif
+#ifdef __APPLE__
+# include <mach-o/dyld.h>
+#endif
#ifdef _WIN32
# define EXESUF ".exe"
-# ifdef _MSC_VER
-# define HOST_TARGET "x86_64-windows-msvc"
-# elif defined(__MINGW32__)
-# define HOST_TARGET "x86_64-windows-gnu"
-# else
-# endif
-#elif defined(__NetBSD__)
-# define EXESUF ""
-# define HOST_TARGET "x86_64-unknown-netbsd"
#else
# define EXESUF ""
-# define HOST_TARGET "x86_64-unknown-linux-gnu"
#endif
+#include <target_detect.h> // tools/common/target_detect.h
+#define HOST_TARGET DEFAULT_TARGET_NAME
/// Class abstracting access to the compiler
class Builder
@@ -549,12 +544,32 @@ Builder::Builder(BuildOptions opts):
#ifdef __MINGW32__
m_compiler_path = (minicargo_path / "..\\..\\bin\\mrustc.exe").normalise();
#else
+ // MSVC, minicargo and mrustc are in the same dir
m_compiler_path = minicargo_path / "mrustc.exe";
#endif
#else
char buf[1024];
- size_t s = readlink("/proc/self/exe", buf, sizeof(buf)-1);
- buf[s] = 0;
+# ifdef __linux__
+ ssize_t s = readlink("/proc/self/exe", buf, sizeof(buf)-1);
+ if(s >= 0)
+ {
+ buf[s] = 0;
+ }
+ else
+#elif defined(__APPLE__)
+ uint32_t s = sizeof(buf);
+ if( _NSGetExecutablePath(buf, &s) == 0 )
+ {
+ // Buffer populated
+ }
+ else
+ // TODO: Buffer too small
+#else
+# warning "Can't runtime determine path to minicargo"
+#endif
+ {
+ strcpy(buf, "tools/bin/minicargo");
+ }
::helpers::path minicargo_path { buf };
minicargo_path.pop_component();
@@ -803,7 +818,7 @@ bool Builder::build_target(const PackageManifest& manifest, const PackageTarget&
auto out_file = output_dir_abs / "build_" + manifest.name().c_str() + ".txt";
auto out_dir = output_dir_abs / "build_" + manifest.name().c_str();
-
+
bool run_build_script = false;
// TODO: Handle a pre-existing script containing `cargo:rerun-if-changed`
auto script_exe = this->build_build_script(manifest, is_for_host, &run_build_script);
@@ -813,7 +828,8 @@ bool Builder::build_target(const PackageManifest& manifest, const PackageTarget&
return ::helpers::path();
}
- if( run_build_script )
+ // If the script changed, OR the output file doesn't exist
+ if( run_build_script || Timestamp::for_file(out_file) == Timestamp::infinite_past() )
{
auto script_exe_abs = script_exe.to_absolute();
@@ -1007,8 +1023,8 @@ bool Builder::spawn_process(const char* exe_name, const StringList& args, const
if( posix_spawn(&pid, exe_name, &fa, /*attr=*/nullptr, (char* const*)argv.data(), (char* const*)envp.get_vec().data()) != 0 )
{
- perror("posix_spawn");
- DEBUG("Unable to spawn compiler");
+ ::std::cerr << "Unable to run process '" << exe_name << "' - " << strerror(errno) << ::std::endl;
+ DEBUG("Unable to spawn executable");
posix_spawn_file_actions_destroy(&fa);
return false;
}
diff --git a/tools/minicargo/main.cpp b/tools/minicargo/main.cpp
index 50e08619..b185881b 100644
--- a/tools/minicargo/main.cpp
+++ b/tools/minicargo/main.cpp
@@ -54,11 +54,29 @@ int main(int argc, const char* argv[])
return 1;
}
- Debug_DisablePhase("Load Repository");
- Debug_DisablePhase("Load Root");
- Debug_DisablePhase("Load Dependencies");
- Debug_DisablePhase("Enumerate Build");
- //Debug_DisablePhase("Run Build");
+ {
+ Debug_DisablePhase("Load Repository");
+ Debug_DisablePhase("Load Root");
+ Debug_DisablePhase("Load Dependencies");
+ Debug_DisablePhase("Enumerate Build");
+ Debug_DisablePhase("Run Build");
+
+ if( const char* e = getenv("MINICARGO_DEBUG") )
+ {
+ while( *e )
+ {
+ const char* colon = ::std::strchr(e, ':');
+ size_t len = colon ? colon - e : ::std::strlen(e);
+
+ Debug_EnablePhase(::std::string(e, len).c_str());
+
+ if( colon )
+ e = colon + 1;
+ else
+ e = e + len;
+ }
+ }
+ }
try
{
diff --git a/tools/minicargo/manifest.cpp b/tools/minicargo/manifest.cpp
index 6e2bf451..687c3e2a 100644
--- a/tools/minicargo/manifest.cpp
+++ b/tools/minicargo/manifest.cpp
@@ -9,7 +9,7 @@
#include <cctype> // toupper
#include "repository.h"
-
+// TODO: Extract this from the target at runtime
#ifdef _WIN32
# define TARGET_NAME "i586-windows-msvc"
# define CFG_UNIX false
diff --git a/tools/standalone_miri/Makefile b/tools/standalone_miri/Makefile
new file mode 100644
index 00000000..f4dc0d0d
--- /dev/null
+++ b/tools/standalone_miri/Makefile
@@ -0,0 +1,44 @@
+#
+# Standalone MIR Interpreter
+#
+ifeq ($(OS),Windows_NT)
+ EXESUF ?= .exe
+endif
+EXESUF ?=
+
+V ?= @
+
+OBJDIR := .obj/
+
+BIN := ../bin/standalone_miri$(EXESUF)
+OBJS := main.o debug.o mir.o lex.o value.o module_tree.o hir_sim.o miri.o
+
+LINKFLAGS := -g -lpthread
+CXXFLAGS := -Wall -std=c++14 -g -O2
+CXXFLAGS += -I ../common -I ../../src/include -I .
+CXXFLAGS += -Wno-misleading-indentation # Gets REALLY confused by the TU_ARM macro
+
+OBJS := $(OBJS:%=$(OBJDIR)%)
+
+.PHONY: all clean
+
+all: $(BIN)
+
+clean:
+ rm $(BIN) $(OBJS)
+
+$(BIN): $(OBJS) ../bin/common_lib.a
+ @mkdir -p $(dir $@)
+ @echo [CXX] -o $@
+ $V$(CXX) -o $@ $(OBJS) ../bin/common_lib.a $(LINKFLAGS)
+
+$(OBJDIR)%.o: %.cpp
+ @mkdir -p $(dir $@)
+ @echo [CXX] $<
+ $V$(CXX) -o $@ -c $< $(CXXFLAGS) -MMD -MP -MF $@.dep
+
+../bin/common_lib.a:
+ make -C ../common
+
+-include $(OBJS:%.o=%.o.dep)
+
diff --git a/tools/standalone_miri/debug.cpp b/tools/standalone_miri/debug.cpp
index 415bc5d5..c49df960 100644
--- a/tools/standalone_miri/debug.cpp
+++ b/tools/standalone_miri/debug.cpp
@@ -1,6 +1,15 @@
+/*
+ * mrustc Standalone MIRI
+ * - by John Hodge (Mutabah)
+ *
+ * debug.cpp
+ * - Interpreter debug logging
+ */
#include "debug.hpp"
+#include <fstream>
unsigned DebugSink::s_indent = 0;
+::std::unique_ptr<std::ofstream> DebugSink::s_out_file;
DebugSink::DebugSink(::std::ostream& inner):
m_inner(inner)
@@ -9,6 +18,11 @@ DebugSink::DebugSink(::std::ostream& inner):
DebugSink::~DebugSink()
{
m_inner << "\n";
+ m_inner.flush();
+}
+void DebugSink::set_output_file(const ::std::string& s)
+{
+ s_out_file.reset(new ::std::ofstream(s));
}
bool DebugSink::enabled(const char* fcn_name)
{
@@ -16,33 +30,34 @@ bool DebugSink::enabled(const char* fcn_name)
}
DebugSink DebugSink::get(const char* fcn_name, const char* file, unsigned line, DebugLevel lvl)
{
+ auto& sink = s_out_file ? *s_out_file : ::std::cout;
for(size_t i = s_indent; i--;)
- ::std::cout << " ";
+ sink << " ";
switch(lvl)
{
case DebugLevel::Trace:
- ::std::cout << "Trace: " << file << ":" << line << ": ";
+ sink << "Trace: " << file << ":" << line << ": ";
break;
case DebugLevel::Debug:
- ::std::cout << "DEBUG: " << fcn_name << ": ";
+ sink << "DEBUG: " << fcn_name << ": ";
break;
case DebugLevel::Notice:
- ::std::cout << "NOTE: ";
+ sink << "NOTE: ";
break;
case DebugLevel::Warn:
- ::std::cout << "WARN: ";
+ sink << "WARN: ";
break;
case DebugLevel::Error:
- ::std::cout << "ERROR: ";
+ sink << "ERROR: ";
break;
case DebugLevel::Fatal:
- ::std::cout << "FATAL: ";
+ sink << "FATAL: ";
break;
case DebugLevel::Bug:
- ::std::cout << "BUG: " << file << ":" << line << ": ";
+ sink << "BUG: " << file << ":" << line << ": ";
break;
}
- return DebugSink(::std::cout);
+ return DebugSink(sink);
}
void DebugSink::inc_indent()
{
@@ -51,4 +66,4 @@ void DebugSink::inc_indent()
void DebugSink::dec_indent()
{
s_indent --;
-} \ No newline at end of file
+}
diff --git a/tools/standalone_miri/debug.hpp b/tools/standalone_miri/debug.hpp
index 5afad96e..b3b0d76f 100644
--- a/tools/standalone_miri/debug.hpp
+++ b/tools/standalone_miri/debug.hpp
@@ -1,10 +1,15 @@
-//
-//
-//
+/*
+ * mrustc Standalone MIRI
+ * - by John Hodge (Mutabah)
+ *
+ * debug.hpp
+ * - Interpreter debug logging
+ */
#pragma once
#include <iostream>
#include <functional>
+#include <memory>
enum class DebugLevel {
Trace,
@@ -19,6 +24,7 @@ enum class DebugLevel {
class DebugSink
{
static unsigned s_indent;
+ static ::std::unique_ptr<std::ofstream> s_out_file;
::std::ostream& m_inner;
DebugSink(::std::ostream& inner);
public:
@@ -27,6 +33,7 @@ public:
template<typename T>
::std::ostream& operator<<(const T& v) { return m_inner << v; }
+ static void set_output_file(const ::std::string& s);
static bool enabled(const char* fcn_name);
static DebugSink get(const char* fcn_name, const char* file, unsigned line, DebugLevel lvl);
// TODO: Add a way to insert an annotation before/after an abort/warning/... that indicates what input location caused it.
@@ -75,14 +82,14 @@ FunctionTrace<T,U> FunctionTrace_d(const char* fname, const char* file, unsigned
struct DebugExceptionTodo:
public ::std::exception
{
- const char* what() const {
+ const char* what() const noexcept override {
return "TODO hit";
}
};
struct DebugExceptionError:
public ::std::exception
{
- const char* what() const {
+ const char* what() const noexcept override {
return "error";
}
};
@@ -90,6 +97,7 @@ struct DebugExceptionError:
#define TRACE_FUNCTION_R(entry, exit) auto ftg##__LINE__ = FunctionTrace_d(__FUNCTION__,__FILE__,__LINE__,[&](DebugSink& FunctionTrace_ss){FunctionTrace_ss << entry;}, [&](DebugSink& FunctionTrace_ss) {FunctionTrace_ss << exit;} )
#define LOG_TRACE(strm) do { if(DebugSink::enabled(__FUNCTION__)) DebugSink::get(__FUNCTION__,__FILE__,__LINE__,DebugLevel::Trace) << strm; } while(0)
#define LOG_DEBUG(strm) do { if(DebugSink::enabled(__FUNCTION__)) DebugSink::get(__FUNCTION__,__FILE__,__LINE__,DebugLevel::Debug) << strm; } while(0)
+#define LOG_NOTICE(strm) do { DebugSink::get(__FUNCTION__,__FILE__,__LINE__,DebugLevel::Notice) << strm; } while(0)
#define LOG_ERROR(strm) do { DebugSink::get(__FUNCTION__,__FILE__,__LINE__,DebugLevel::Error) << strm; throw DebugExceptionError{}; } while(0)
#define LOG_FATAL(strm) do { DebugSink::get(__FUNCTION__,__FILE__,__LINE__,DebugLevel::Fatal) << strm; exit(1); } while(0)
#define LOG_TODO(strm) do { DebugSink::get(__FUNCTION__,__FILE__,__LINE__,DebugLevel::Bug) << "TODO: " << strm; throw DebugExceptionTodo{}; } while(0)
diff --git a/tools/standalone_miri/hir_sim.cpp b/tools/standalone_miri/hir_sim.cpp
index 604f65a4..88739730 100644
--- a/tools/standalone_miri/hir_sim.cpp
+++ b/tools/standalone_miri/hir_sim.cpp
@@ -1,7 +1,12 @@
-//
-//
-//
+/*
+ * mrustc Standalone MIRI
+ * - by John Hodge (Mutabah)
+ *
+ * value.cpp
+ * - Copy of the various HIR types from the compiler
+ */
#include <iostream>
+#include <algorithm>
#include "hir_sim.hpp"
#include "module_tree.hpp"
@@ -13,13 +18,65 @@
size_t HIR::TypeRef::get_size(size_t ofs) const
{
- if( this->wrappers.size() <= ofs )
+ if( const auto* w = this->get_wrapper(ofs) )
+ {
+ switch(w->type)
+ {
+ case TypeWrapper::Ty::Array:
+ return this->get_size(1) * w->size;
+ case TypeWrapper::Ty::Borrow:
+ case TypeWrapper::Ty::Pointer:
+ if( const auto* next_w = this->get_wrapper(ofs+1) )
+ {
+ if( next_w->type == TypeWrapper::Ty::Slice )
+ {
+ return POINTER_SIZE*2;
+ }
+ else
+ {
+ return POINTER_SIZE;
+ }
+ }
+ else
+ {
+ // Need to look up the metadata type for the actual type
+ if( this->inner_type == RawType::Composite )
+ {
+ if( this->composite_type->dst_meta == RawType::Unreachable )
+ {
+ return POINTER_SIZE;
+ }
+ // Special case: extern types (which appear when a type is only ever used by pointer)
+ if( this->composite_type->dst_meta == RawType::Unit )
+ {
+ return POINTER_SIZE;
+ }
+
+ // TODO: Ideally, this inner type wouldn't be unsized itself... but checking that would be interesting.
+ return POINTER_SIZE + this->composite_type->dst_meta.get_size();
+ }
+ else if( this->inner_type == RawType::Str )
+ return POINTER_SIZE*2;
+ else if( this->inner_type == RawType::TraitObject )
+ return POINTER_SIZE*2;
+ else
+ {
+ return POINTER_SIZE;
+ }
+ }
+ case TypeWrapper::Ty::Slice:
+ LOG_BUG("Getting size of a slice - " << *this);
+ }
+ throw "";
+ }
+ else
{
switch(this->inner_type)
{
case RawType::Unit:
return 0;
case RawType::Composite:
+ // NOTE: Don't care if the type has metadata
return this->composite_type->size;
case RawType::Unreachable:
LOG_BUG("Attempting to get size of an unreachable type, " << *this);
@@ -53,92 +110,88 @@ size_t HIR::TypeRef::get_size(size_t ofs) const
}
throw "";
}
-
- switch(this->wrappers[ofs].type)
- {
- case TypeWrapper::Ty::Array:
- return this->get_size(1) * this->wrappers[ofs].size;
- case TypeWrapper::Ty::Borrow:
- case TypeWrapper::Ty::Pointer:
- if( this->wrappers.size() == ofs+1 )
- {
- // Need to look up the metadata type for the actual type
- if( this->inner_type == RawType::Composite )
- {
- if( this->composite_type->dst_meta == RawType::Unreachable )
- {
- return POINTER_SIZE;
- }
- // Special case: extern types (which appear when a type is only ever used by pointer)
- if( this->composite_type->dst_meta == RawType::Unit )
- {
- return POINTER_SIZE;
- }
-
- // TODO: Ideally, this inner type wouldn't be unsized itself... but checking that would be interesting.
- return POINTER_SIZE + this->composite_type->dst_meta.get_size();
- }
- else if( this->inner_type == RawType::Str )
- return POINTER_SIZE*2;
- else if( this->inner_type == RawType::TraitObject )
- return POINTER_SIZE*2;
- else
- {
- return POINTER_SIZE;
- }
- }
- else if( this->wrappers[ofs+1].type == TypeWrapper::Ty::Slice )
- {
- return POINTER_SIZE*2;
- }
- else
- {
- return POINTER_SIZE;
- }
- case TypeWrapper::Ty::Slice:
- LOG_BUG("Getting size of a slice - " << *this);
- }
- throw "";
}
-bool HIR::TypeRef::has_slice_meta() const
+bool HIR::TypeRef::has_slice_meta(size_t& out_inner_size) const
{
- if( this->wrappers.size() == 0 )
+ if( const auto* w = this->get_wrapper() )
+ {
+ out_inner_size = this->get_size(1);
+ return (w->type == TypeWrapper::Ty::Slice);
+ }
+ else
{
if(this->inner_type == RawType::Composite)
{
- // TODO: Handle metadata better
+ // TODO: This type could be wrapping a slice, needs to return the inner type size.
+ // - Also need to know which field is the unsized one
return false;
}
else
{
+ out_inner_size = 1;
return (this->inner_type == RawType::Str);
}
}
- else
- {
- return (this->wrappers[0].type == TypeWrapper::Ty::Slice);
- }
}
HIR::TypeRef HIR::TypeRef::get_inner() const
{
if( this->wrappers.empty() )
{
- throw "ERROR";
+ LOG_ERROR("Getting inner of a non-wrapped type - " << *this);
+ }
+ else
+ {
+ auto ity = *this;
+ ity.wrappers.erase(ity.wrappers.begin());
+ return ity;
}
- auto ity = *this;
- ity.wrappers.erase(ity.wrappers.begin());
- return ity;
}
-HIR::TypeRef HIR::TypeRef::wrap(TypeWrapper::Ty ty, size_t size) const
+HIR::TypeRef HIR::TypeRef::wrap(TypeWrapper::Ty ty, size_t size)&&
{
- auto rv = *this;
+ auto rv = ::std::move(*this);
rv.wrappers.insert(rv.wrappers.begin(), { ty, size });
return rv;
}
-const HIR::TypeRef* HIR::TypeRef::get_usized_type(size_t& running_inner_size) const
+bool HIR::TypeRef::has_pointer() const
{
- if( this->wrappers.empty() )
+ // If ALL of the (potentially non) wrappers are Array, look deeper
+ // - Don't need to worry about unsized types here
+ if( ::std::all_of(this->wrappers.begin(), this->wrappers.end(), [](const auto& x){ return x.type == TypeWrapper::Ty::Array; }) )
+ {
+ // TODO: Function pointers should be _pointers_
+ if( this->inner_type == RawType::Function )
+ {
+ return true;
+ }
+ // Check the inner type
+ if( this->inner_type == RawType::Composite )
+ {
+ // Still not sure, check the inner for any pointers.
+ for(const auto& fld : this->composite_type->fields)
+ {
+ if( fld.second.has_pointer() )
+ return true;
+ }
+ }
+ return false;
+ }
+ return true;
+}
+const HIR::TypeRef* HIR::TypeRef::get_unsized_type(size_t& running_inner_size) const
+{
+ if( const auto* w = this->get_wrapper() )
+ {
+ if( w->type == TypeWrapper::Ty::Slice )
+ {
+ return this;
+ }
+ else
+ {
+ return nullptr;
+ }
+ }
+ else
{
switch(this->inner_type)
{
@@ -149,7 +202,7 @@ const HIR::TypeRef* HIR::TypeRef::get_usized_type(size_t& running_inner_size) co
return nullptr;
running_inner_size = this->composite_type->fields.back().first;
size_t tmp;
- return this->composite_type->fields.back().second.get_usized_type(tmp);
+ return this->composite_type->fields.back().second.get_unsized_type(tmp);
case RawType::TraitObject:
case RawType::Str:
return this;
@@ -157,47 +210,60 @@ const HIR::TypeRef* HIR::TypeRef::get_usized_type(size_t& running_inner_size) co
return nullptr;
}
}
- else if( this->wrappers[0].type == TypeWrapper::Ty::Slice )
+}
+HIR::TypeRef HIR::TypeRef::get_meta_type() const
+{
+ if( const auto* w = this->get_wrapper() )
{
- return this;
+ if( w->type == TypeWrapper::Ty::Slice )
+ {
+ return TypeRef(RawType::USize);
+ }
+ else
+ {
+ return TypeRef(RawType::Unreachable);
+ }
}
else
{
- return nullptr;
- }
-}
-const HIR::TypeRef* HIR::TypeRef::get_meta_type() const
-{
- static ::HIR::TypeRef static_usize = ::HIR::TypeRef(RawType::USize);
- if( this->wrappers.empty() )
- {
switch(this->inner_type)
{
case RawType::Composite:
if( this->composite_type->dst_meta == RawType::Unreachable )
- return nullptr;
- return &this->composite_type->dst_meta;
+ return TypeRef(RawType::Unreachable);
+ return this->composite_type->dst_meta;
case RawType::TraitObject:
- LOG_TODO("get_meta_type on TraitObject - " << *this);
+ return ::HIR::TypeRef(this->composite_type).wrap( TypeWrapper::Ty::Pointer, static_cast<size_t>(BorrowType::Shared) );
case RawType::Str:
- return &static_usize;
+ return TypeRef(RawType::USize);
default:
- return nullptr;
+ return TypeRef(RawType::Unreachable);
}
}
- else if( this->wrappers[0].type == TypeWrapper::Ty::Slice )
- {
- return &static_usize;
- }
- else
- {
- return nullptr;
- }
}
HIR::TypeRef HIR::TypeRef::get_field(size_t idx, size_t& ofs) const
{
- if( this->wrappers.empty() )
+ if( const auto* w = this->get_wrapper() )
+ {
+ if( w->type == TypeWrapper::Ty::Slice )
+ {
+ // TODO
+ throw "TODO";
+ }
+ else if( w->type == TypeWrapper::Ty::Array )
+ {
+ LOG_ASSERT(idx < w->size, "Getting field on array with OOB index - " << idx << " >= " << w->size << " - " << *this);
+ auto ity = this->get_inner();
+ ofs = ity.get_size() * idx;
+ return ity;
+ }
+ else
+ {
+ throw "ERROR";
+ }
+ }
+ else
{
if( this->inner_type == RawType::Composite )
{
@@ -211,21 +277,6 @@ HIR::TypeRef HIR::TypeRef::get_field(size_t idx, size_t& ofs) const
throw "ERROR";
}
}
- else if( this->wrappers.front().type == TypeWrapper::Ty::Slice )
- {
- // TODO
- throw "TODO";
- }
- else if( this->wrappers.front().type == TypeWrapper::Ty::Array )
- {
- auto ity = this->get_inner();
- ofs = ity.get_size() * idx;
- return ity;
- }
- else
- {
- throw "ERROR";
- }
}
size_t HIR::TypeRef::get_field_ofs(size_t base_idx, const ::std::vector<size_t>& other_idx, TypeRef& ty) const
{
@@ -305,7 +356,11 @@ namespace HIR {
os << "function_?";
break;
case RawType::TraitObject:
- os << "traitobject_?";
+ os << "dyn ";
+ if( x.composite_type )
+ os << x.composite_type->my_path;
+ else
+ os << "?";
break;
case RawType::Bool: os << "bool"; break;
case RawType::Char: os << "char"; break;
@@ -385,4 +440,4 @@ namespace HIR {
}
return os;
}
-} \ No newline at end of file
+}
diff --git a/tools/standalone_miri/hir_sim.hpp b/tools/standalone_miri/hir_sim.hpp
index 7154de13..7730ac48 100644
--- a/tools/standalone_miri/hir_sim.hpp
+++ b/tools/standalone_miri/hir_sim.hpp
@@ -23,7 +23,7 @@ struct DataType;
enum class RawType
{
Unreachable,
- Function,
+ Function, // TODO: Needs a way of indicating the signature?
Unit,
Bool,
@@ -39,7 +39,7 @@ enum class RawType
Char, Str,
Composite, // Struct, Enum, Union, tuple, ...
- TraitObject, // Data pointer is `*const ()`, metadata type stored in `composite_type`
+ TraitObject, // Data pointer is `*const ()`, vtable type stored in `composite_type`
};
struct TypeWrapper
{
@@ -118,12 +118,41 @@ namespace HIR {
}
size_t get_size(size_t ofs=0) const;
- bool has_slice_meta() const; // The attached metadata is a count
- const TypeRef* get_usized_type(size_t& running_inner_size) const;
- const TypeRef* get_meta_type() const;
+
+ // Returns true if this (unsized) type is a wrapper around a slice
+ // - Fills `out_inner_size` with the size of the slice element
+ bool has_slice_meta(size_t& out_inner_size) const; // The attached metadata is a count of elements
+ // Returns the base unsized type for this type (returning nullptr if there's no unsized field)
+ // - Fills `running_inner_size` with the offset to the unsized field
+ const TypeRef* get_unsized_type(size_t& running_inner_size) const;
+ // Returns the type of associated metadata for this (unsized) type (or `!` if not unsized)
+ TypeRef get_meta_type() const;
+ // Get the inner type (one level of wrapping removed)
TypeRef get_inner() const;
- TypeRef wrap(TypeWrapper::Ty ty, size_t size) const;
+
+ // Add a wrapper over this type (moving)
+ TypeRef wrap(TypeWrapper::Ty ty, size_t size)&&;
+ // Add a wrapper over this type (copying)
+ TypeRef wrapped(TypeWrapper::Ty ty, size_t size) const {
+ return TypeRef(*this).wrap(ty, size);
+ }
+ // Get the wrapper at the provided offset (0 = outermost)
+ const TypeWrapper* get_wrapper(size_t ofs=0) const {
+ //assert(ofs <= this->wrappers.size());
+ if( ofs < this->wrappers.size() ) {
+ return &this->wrappers[ofs];
+ }
+ else {
+ return nullptr;
+ }
+ }
+
+ // Returns true if the type contains any pointers
+ bool has_pointer() const;
+ // Get the type and offset of the specified field index
TypeRef get_field(size_t idx, size_t& ofs) const;
+ // Get the offset and type of a field (recursing using `other_idx`)
+ size_t get_field_ofs(size_t idx, const ::std::vector<size_t>& other_idx, TypeRef& ty) const;
bool operator==(const RawType& x) const {
if( this->wrappers.size() != 0 )
@@ -149,8 +178,6 @@ namespace HIR {
return false;
}
- size_t get_field_ofs(size_t idx, const ::std::vector<size_t>& other_idx, TypeRef& ty) const;
-
friend ::std::ostream& operator<<(::std::ostream& os, const TypeRef& x);
};
diff --git a/tools/standalone_miri/lex.cpp b/tools/standalone_miri/lex.cpp
index 48a9e0cd..8fc77f7a 100644
--- a/tools/standalone_miri/lex.cpp
+++ b/tools/standalone_miri/lex.cpp
@@ -11,11 +11,11 @@ bool Token::operator==(TokenClass tc) const
}
bool Token::operator==(char c) const
{
- return this->strval.size() == 1 && this->strval[0] == c;
+ return (this->type == TokenClass::Ident || this->type == TokenClass::Symbol) && this->strval.size() == 1 && this->strval[0] == c;
}
bool Token::operator==(const char* s) const
{
- return this->strval == s;
+ return (this->type == TokenClass::Ident || this->type == TokenClass::Symbol) && this->strval == s;
}
uint64_t Token::integer() const
@@ -56,6 +56,9 @@ double Token::real() const
case TokenClass::ByteString:
os << "b\"" << x.strval << "\"";
break;
+ case TokenClass::Lifetime:
+ os << "'" << x.strval << "\"";
+ break;
}
return os;
}
@@ -95,6 +98,7 @@ Token Lexer::consume()
auto rv = ::std::move(m_cur);
advance();
+ //::std::cout << *this << "Lexer::consume " << rv << " -> " << m_cur << ::std::endl;
return rv;
}
@@ -349,6 +353,23 @@ void Lexer::advance()
auto val = this->parse_string();
m_cur = Token { TokenClass::String, ::std::move(val) };
}
+ else if( ch == '\'')
+ {
+ ::std::string val;
+ ch = m_if.get();
+ while( ch == '_' || ::std::isalnum(ch) )
+ {
+ val += ch;
+ ch = m_if.get();
+ }
+ m_if.unget();
+ if( val == "" )
+ {
+ ::std::cerr << *this << "Empty lifetime name";
+ throw "ERROR";
+ }
+ m_cur = Token { TokenClass::Lifetime, ::std::move(val) };
+ }
else
{
switch(ch)
@@ -390,6 +411,10 @@ void Lexer::advance()
{
m_cur = Token { TokenClass::Symbol, "<<" };
}
+ else if( ch == '=' )
+ {
+ m_cur = Token { TokenClass::Symbol, "<=" };
+ }
else
{
m_if.unget();
diff --git a/tools/standalone_miri/lex.hpp b/tools/standalone_miri/lex.hpp
index 95130111..cc1429f7 100644
--- a/tools/standalone_miri/lex.hpp
+++ b/tools/standalone_miri/lex.hpp
@@ -14,6 +14,7 @@ enum class TokenClass
Real,
String,
ByteString,
+ Lifetime,
};
struct Token
diff --git a/tools/standalone_miri/main.cpp b/tools/standalone_miri/main.cpp
index a75e753f..deed08be 100644
--- a/tools/standalone_miri/main.cpp
+++ b/tools/standalone_miri/main.cpp
@@ -1,28 +1,36 @@
-//
-//
-//
+/*
+ * mrustc Standalone MIRI
+ * - by John Hodge (Mutabah)
+ *
+ * main.cpp
+ * - Program entrypoint
+ */
#include <iostream>
#include "module_tree.hpp"
#include "value.hpp"
#include <algorithm>
#include <iomanip>
#include "debug.hpp"
-#ifdef _WIN32
-# define NOMINMAX
-# include <Windows.h>
-#endif
+#include "miri.hpp"
+
struct ProgramOptions
{
::std::string infile;
+ //TODO: Architecture file
+ //::std::string archname;
+ //TODO: Loadable FFI descriptions
+ //::std::vector<const char*> ffi_api_files;
+
+ // Output logfile
+ ::std::string logfile;
+ // Arguments for the program
+ ::std::vector<const char*> args;
int parse(int argc, const char* argv[]);
+ void show_help(const char* prog) const;
};
-Value MIRI_Invoke(ModuleTree& modtree, ::HIR::Path path, ::std::vector<Value> args);
-Value MIRI_Invoke_Extern(const ::std::string& link_name, const ::std::string& abi, ::std::vector<Value> args);
-Value MIRI_Invoke_Intrinsic(ModuleTree& modtree, const ::std::string& name, const ::HIR::PathParams& ty_params, ::std::vector<Value> args);
-
int main(int argc, const char* argv[])
{
ProgramOptions opts;
@@ -32,1914 +40,150 @@ int main(int argc, const char* argv[])
return 1;
}
- auto tree = ModuleTree {};
-
- tree.load_file(opts.infile);
-
- auto val_argc = Value( ::HIR::TypeRef{RawType::I32} );
- ::HIR::TypeRef argv_ty { RawType::I8 };
- argv_ty.wrappers.push_back(TypeWrapper { TypeWrapper::Ty::Pointer, 0 });
- argv_ty.wrappers.push_back(TypeWrapper { TypeWrapper::Ty::Pointer, 0 });
- auto val_argv = Value(argv_ty);
- val_argc.write_bytes(0, "\0\0\0", 4);
- val_argv.write_bytes(0, "\0\0\0\0\0\0\0", argv_ty.get_size());
+ // Configure logging
+ if( opts.logfile != "" )
+ {
+ DebugSink::set_output_file(opts.logfile);
+ }
+ // Load HIR tree
+ auto tree = ModuleTree {};
try
{
- ::std::vector<Value> args;
- args.push_back(::std::move(val_argc));
- args.push_back(::std::move(val_argv));
- auto rv = MIRI_Invoke( tree, tree.find_lang_item("start"), ::std::move(args) );
- ::std::cout << rv << ::std::endl;
+ tree.load_file(opts.infile);
}
catch(const DebugExceptionTodo& /*e*/)
{
::std::cerr << "TODO Hit" << ::std::endl;
+ if(opts.logfile != "")
+ {
+ ::std::cerr << "- See '" << opts.logfile << "' for details" << ::std::endl;
+ }
return 1;
}
catch(const DebugExceptionError& /*e*/)
{
::std::cerr << "Error encountered" << ::std::endl;
- return 1;
- }
-
- return 0;
-}
-class PrimitiveValue
-{
-public:
- virtual ~PrimitiveValue() {}
-
- virtual bool add(const PrimitiveValue& v) = 0;
- virtual bool subtract(const PrimitiveValue& v) = 0;
- virtual bool multiply(const PrimitiveValue& v) = 0;
- virtual bool divide(const PrimitiveValue& v) = 0;
- virtual bool modulo(const PrimitiveValue& v) = 0;
- virtual void write_to_value(Value& tgt, size_t ofs) const = 0;
-
- template<typename T>
- const T& check(const char* opname) const
- {
- const auto* xp = dynamic_cast<const T*>(this);
- LOG_ASSERT(xp, "Attempting to " << opname << " mismatched types, expected " << typeid(T).name() << " got " << typeid(*this).name());
- return *xp;
- }
-};
-template<typename T>
-struct PrimitiveUInt:
- public PrimitiveValue
-{
- typedef PrimitiveUInt<T> Self;
- T v;
-
- PrimitiveUInt(T v): v(v) {}
- ~PrimitiveUInt() override {}
-
- bool add(const PrimitiveValue& x) override {
- const auto* xp = &x.check<Self>("add");
- T newv = this->v + xp->v;
- bool did_overflow = newv < this->v;
- this->v = newv;
- return !did_overflow;
- }
- bool subtract(const PrimitiveValue& x) override {
- const auto* xp = &x.check<Self>("subtract");
- T newv = this->v - xp->v;
- bool did_overflow = newv > this->v;
- this->v = newv;
- return !did_overflow;
- }
- bool multiply(const PrimitiveValue& x) override {
- const auto* xp = &x.check<Self>("multiply");
- T newv = this->v * xp->v;
- bool did_overflow = newv < this->v && newv < xp->v;
- this->v = newv;
- return !did_overflow;
- }
- bool divide(const PrimitiveValue& x) override {
- const auto* xp = &x.check<Self>("divide");
- if(xp->v == 0) return false;
- T newv = this->v / xp->v;
- this->v = newv;
- return true;
- }
- bool modulo(const PrimitiveValue& x) override {
- const auto* xp = &x.check<Self>("modulo");
- if(xp->v == 0) return false;
- T newv = this->v % xp->v;
- this->v = newv;
- return true;
- }
-};
-struct PrimitiveU64: public PrimitiveUInt<uint64_t>
-{
- PrimitiveU64(uint64_t v): PrimitiveUInt(v) {}
- void write_to_value(Value& tgt, size_t ofs) const override {
- tgt.write_u64(ofs, this->v);
- }
-};
-struct PrimitiveU32: public PrimitiveUInt<uint32_t>
-{
- PrimitiveU32(uint32_t v): PrimitiveUInt(v) {}
- void write_to_value(Value& tgt, size_t ofs) const override {
- tgt.write_u32(ofs, this->v);
- }
-};
-template<typename T>
-struct PrimitiveSInt:
- public PrimitiveValue
-{
- typedef PrimitiveSInt<T> Self;
- T v;
-
- PrimitiveSInt(T v): v(v) {}
- ~PrimitiveSInt() override {}
-
- // TODO: Make this correct.
- bool add(const PrimitiveValue& x) override {
- const auto* xp = &x.check<Self>("add");
- T newv = this->v + xp->v;
- bool did_overflow = newv < this->v;
- this->v = newv;
- return !did_overflow;
- }
- bool subtract(const PrimitiveValue& x) override {
- const auto* xp = &x.check<Self>("subtract");
- T newv = this->v - xp->v;
- bool did_overflow = newv > this->v;
- this->v = newv;
- return !did_overflow;
- }
- bool multiply(const PrimitiveValue& x) override {
- const auto* xp = &x.check<Self>("multiply");
- T newv = this->v * xp->v;
- bool did_overflow = newv < this->v && newv < xp->v;
- this->v = newv;
- return !did_overflow;
- }
- bool divide(const PrimitiveValue& x) override {
- const auto* xp = &x.check<Self>("divide");
- if(xp->v == 0) return false;
- T newv = this->v / xp->v;
- this->v = newv;
- return true;
- }
- bool modulo(const PrimitiveValue& x) override {
- const auto* xp = &x.check<Self>("modulo");
- if(xp->v == 0) return false;
- T newv = this->v % xp->v;
- this->v = newv;
- return true;
- }
-};
-struct PrimitiveI64: public PrimitiveSInt<int64_t>
-{
- PrimitiveI64(int64_t v): PrimitiveSInt(v) {}
- void write_to_value(Value& tgt, size_t ofs) const override {
- tgt.write_i64(ofs, this->v);
- }
-};
-struct PrimitiveI32: public PrimitiveSInt<int32_t>
-{
- PrimitiveI32(int32_t v): PrimitiveSInt(v) {}
- void write_to_value(Value& tgt, size_t ofs) const override {
- tgt.write_i32(ofs, this->v);
- }
-};
-
-class PrimitiveValueVirt
-{
- uint64_t buf[3]; // Allows i128 plus a vtable pointer
- PrimitiveValueVirt() {}
-public:
- // HACK: No copy/move constructors, assumes that contained data is always POD
- ~PrimitiveValueVirt() {
- reinterpret_cast<PrimitiveValue*>(&this->buf)->~PrimitiveValue();
- }
- PrimitiveValue& get() { return *reinterpret_cast<PrimitiveValue*>(&this->buf); }
- const PrimitiveValue& get() const { return *reinterpret_cast<const PrimitiveValue*>(&this->buf); }
-
- static PrimitiveValueVirt from_value(const ::HIR::TypeRef& t, const ValueRef& v) {
- PrimitiveValueVirt rv;
- LOG_ASSERT(t.wrappers.empty(), "PrimitiveValueVirt::from_value: " << t);
- switch(t.inner_type)
- {
- case RawType::U32:
- new(&rv.buf) PrimitiveU32(v.read_u32(0));
- break;
- case RawType::U64:
- new(&rv.buf) PrimitiveU64(v.read_u64(0));
- break;
- case RawType::USize:
- if( POINTER_SIZE == 8 )
- new(&rv.buf) PrimitiveU64(v.read_u64(0));
- else
- new(&rv.buf) PrimitiveU32(v.read_u32(0));
- break;
-
- case RawType::I32:
- new(&rv.buf) PrimitiveI32(v.read_i32(0));
- break;
- case RawType::I64:
- new(&rv.buf) PrimitiveI64(v.read_i64(0));
- break;
- case RawType::ISize:
- if( POINTER_SIZE == 8 )
- new(&rv.buf) PrimitiveI64(v.read_i64(0));
- else
- new(&rv.buf) PrimitiveI32(v.read_i32(0));
- break;
- default:
- LOG_TODO("PrimitiveValueVirt::from_value: " << t);
- }
- return rv;
- }
-};
-
-struct Ops {
- template<typename T>
- static int do_compare(T l, T r) {
- if( l == r ) {
- return 0;
- }
- else if( !(l != r) ) {
- // Special return value for NaN w/ NaN
- return 2;
- }
- else if( l < r ) {
- return -1;
- }
- else {
- return 1;
- }
- }
- template<typename T>
- static T do_bitwise(T l, T r, ::MIR::eBinOp op) {
- switch(op)
- {
- case ::MIR::eBinOp::BIT_AND: return l & r;
- case ::MIR::eBinOp::BIT_OR: return l | r;
- case ::MIR::eBinOp::BIT_XOR: return l ^ r;
- case ::MIR::eBinOp::BIT_SHL: return l << r;
- case ::MIR::eBinOp::BIT_SHR: return l >> r;
- default:
- LOG_BUG("Unexpected operation in Ops::do_bitwise");
- }
- }
-};
-
-namespace
-{
-
- void drop_value(ModuleTree& modtree, Value ptr, const ::HIR::TypeRef& ty)
- {
- if( ty.wrappers.empty() )
- {
- if( ty.inner_type == RawType::Composite )
- {
- if( ty.composite_type->drop_glue != ::HIR::Path() )
- {
- LOG_DEBUG("Drop - " << ty);
-
- MIRI_Invoke(modtree, ty.composite_type->drop_glue, { ptr });
- }
- else
- {
- // No drop glue
- }
- }
- else if( ty.inner_type == RawType::TraitObject )
- {
- LOG_TODO("Drop - " << ty << " - trait object");
- }
- else
- {
- // No destructor
- }
- }
- else if( ty.wrappers[0].type == TypeWrapper::Ty::Borrow )
+ if(opts.logfile != "")
{
- if( ty.wrappers[0].size == static_cast<size_t>(::HIR::BorrowType::Move) )
- {
- LOG_TODO("Drop - " << ty << " - dereference and go to inner");
- // TODO: Clear validity on the entire inner value.
- }
- else
- {
- // No destructor
- }
- }
- // TODO: Arrays
- else
- {
- LOG_TODO("Drop - " << ty << " - array?");
+ ::std::cerr << "- See '" << opts.logfile << "' for details" << ::std::endl;
}
+ return 1;
}
-}
-
-Value MIRI_Invoke(ModuleTree& modtree, ::HIR::Path path, ::std::vector<Value> args)
-{
- Value ret;
-
- const auto& fcn = modtree.get_function(path);
-
-
- // TODO: Support overriding certain functions
- {
- if( path == ::HIR::SimplePath { "std", { "sys", "imp", "c", "SetThreadStackGuarantee" } } )
- {
- ret = Value(::HIR::TypeRef{RawType::I32});
- ret.write_i32(0, 120); // ERROR_CALL_NOT_IMPLEMENTED
- return ret;
- }
- }
-
- if( fcn.external.link_name != "" )
- {
- // External function!
- ret = MIRI_Invoke_Extern(fcn.external.link_name, fcn.external.link_abi, ::std::move(args));
- LOG_DEBUG(path << " = " << ret);
- return ret;
- }
-
- TRACE_FUNCTION_R(path, path << " = " << ret);
- for(size_t i = 0; i < args.size(); i ++)
+ // Create argc/argv based on input arguments
+ auto argv_alloc = Allocation::new_alloc((1 + opts.args.size()) * POINTER_SIZE);
+ argv_alloc->write_usize(0 * POINTER_SIZE, 0);
+ argv_alloc->relocations.push_back({ 0 * POINTER_SIZE, RelocationPtr::new_ffi(FFIPointer { "", (void*)(opts.infile.c_str()), opts.infile.size() + 1 }) });
+ for(size_t i = 0; i < opts.args.size(); i ++)
{
- LOG_DEBUG("- Argument(" << i << ") = " << args[i]);
+ argv_alloc->write_usize((1 + i) * POINTER_SIZE, 0);
+ argv_alloc->relocations.push_back({ (1 + i) * POINTER_SIZE, RelocationPtr::new_ffi({ "", (void*)(opts.args[0]), ::std::strlen(opts.args[0]) + 1 }) });
}
+
+ // Construct argc/argv values
+ auto val_argc = Value::new_isize(1 + opts.args.size());
+ auto argv_ty = ::HIR::TypeRef(RawType::I8).wrap(TypeWrapper::Ty::Pointer, 0 ).wrap(TypeWrapper::Ty::Pointer, 0);
+ auto val_argv = Value::new_pointer(argv_ty, 0, RelocationPtr::new_alloc(argv_alloc));
- ret = Value(fcn.ret_ty == RawType::Unreachable ? ::HIR::TypeRef() : fcn.ret_ty);
-
- struct State
+ // Catch various exceptions from the interpreter
+ try
{
- ModuleTree& modtree;
- const Function& fcn;
- Value& ret;
+ InterpreterThread root_thread(tree);
+
::std::vector<Value> args;
- ::std::vector<Value> locals;
- ::std::vector<bool> drop_flags;
-
- State(ModuleTree& modtree, const Function& fcn, Value& ret, ::std::vector<Value> args):
- modtree(modtree),
- fcn(fcn),
- ret(ret),
- args(::std::move(args)),
- drop_flags(fcn.m_mir.drop_flags)
- {
- locals.reserve(fcn.m_mir.locals.size());
- for(const auto& ty : fcn.m_mir.locals)
- {
- if( ty == RawType::Unreachable ) {
- // HACK: Locals can be !, but they can NEVER be accessed
- locals.push_back(Value());
- }
- else {
- locals.push_back(Value(ty));
- }
- }
- }
-
- ValueRef get_value_and_type(const ::MIR::LValue& lv, ::HIR::TypeRef& ty)
- {
- switch(lv.tag())
- {
- case ::MIR::LValue::TAGDEAD: throw "";
- TU_ARM(lv, Return, _e) {
- ty = fcn.ret_ty;
- return ValueRef(ret, 0, ret.size());
- } break;
- TU_ARM(lv, Local, e) {
- ty = fcn.m_mir.locals.at(e);
- return ValueRef(locals.at(e), 0, locals.at(e).size());
- } break;
- TU_ARM(lv, Argument, e) {
- ty = fcn.args.at(e.idx);
- return ValueRef(args.at(e.idx), 0, args.at(e.idx).size());
- } break;
- TU_ARM(lv, Static, e) {
- // TODO: Type!
- return ValueRef(modtree.get_static(e), 0, modtree.get_static(e).size());
- } break;
- TU_ARM(lv, Index, e) {
- auto idx = get_value_ref(*e.idx).read_usize(0);
- ::HIR::TypeRef array_ty;
- auto base_val = get_value_and_type(*e.val, array_ty);
- if( array_ty.wrappers.empty() )
- throw "ERROR";
- if( array_ty.wrappers.front().type == TypeWrapper::Ty::Array )
- {
- ty = array_ty.get_inner();
- base_val.m_offset += ty.get_size() * idx;
- return base_val;
- }
- else if( array_ty.wrappers.front().type == TypeWrapper::Ty::Slice )
- {
- throw "TODO";
- }
- else
- {
- throw "ERROR";
- }
- } break;
- TU_ARM(lv, Field, e) {
- ::HIR::TypeRef composite_ty;
- auto base_val = get_value_and_type(*e.val, composite_ty);
- // TODO: if there's metadata present in the base, but the inner doesn't have metadata, clear the metadata
- size_t inner_ofs;
- ty = composite_ty.get_field(e.field_index, inner_ofs);
- LOG_DEBUG("Field - " << composite_ty << "#" << e.field_index << " = @" << inner_ofs << " " << ty);
- base_val.m_offset += inner_ofs;
- if( !ty.get_meta_type() )
- {
- LOG_ASSERT(base_val.m_size >= ty.get_size(), "Field didn't fit in the value - " << ty.get_size() << " required, but " << base_val.m_size << " avail");
- base_val.m_size = ty.get_size();
- }
- return base_val;
- }
- TU_ARM(lv, Downcast, e) {
- ::HIR::TypeRef composite_ty;
- auto base_val = get_value_and_type(*e.val, composite_ty);
- LOG_DEBUG("Downcast - " << composite_ty);
-
- size_t inner_ofs;
- ty = composite_ty.get_field(e.variant_index, inner_ofs);
- base_val.m_offset += inner_ofs;
- return base_val;
- }
- TU_ARM(lv, Deref, e) {
- ::HIR::TypeRef ptr_ty;
- auto val = get_value_and_type(*e.val, ptr_ty);
- ty = ptr_ty.get_inner();
- LOG_DEBUG("val = " << val);
-
- LOG_ASSERT(val.m_size >= POINTER_SIZE, "Deref of a value that doesn't fit a pointer - " << ty);
- size_t ofs = val.read_usize(0);
-
- // There MUST be a relocation at this point with a valid allocation.
- auto& val_alloc = val.m_alloc ? val.m_alloc : val.m_value->allocation;
- LOG_ASSERT(val_alloc, "Deref of a value with no allocation (hence no relocations)");
- LOG_ASSERT(val_alloc.is_alloc(), "Deref of a value with a non-data allocation");
- LOG_TRACE("Deref " << val_alloc.alloc() << " + " << ofs << " to give value of type " << ty);
- auto alloc = val_alloc.alloc().get_relocation(val.m_offset);
- LOG_ASSERT(alloc, "Deref of a value with no relocation");
- if( alloc.is_alloc() )
- {
- LOG_DEBUG("> " << lv << " alloc=" << alloc.alloc());
- }
- size_t size;
-
- const auto* meta_ty = ty.get_meta_type();
- ::std::shared_ptr<Value> meta_val;
- // If the type has metadata, store it.
- if( meta_ty )
- {
- auto meta_size = meta_ty->get_size();
- LOG_ASSERT(val.m_size == POINTER_SIZE + meta_size, "Deref of " << ty << ", but pointer isn't correct size");
- meta_val = ::std::make_shared<Value>( val.read_value(POINTER_SIZE, meta_size) );
-
- // TODO: Get a more sane size from the metadata
- LOG_DEBUG("> Meta " << *meta_val << ", size = " << alloc.get_size() << " - " << ofs);
- size = alloc.get_size() - ofs;
- }
- else
- {
- LOG_ASSERT(val.m_size == POINTER_SIZE, "Deref of a value that isn't a pointer-sized value (size=" << val.m_size << ") - " << val << ": " << ptr_ty);
- size = ty.get_size();
- }
-
- auto rv = ValueRef(::std::move(alloc), ofs, size);
- rv.m_metadata = ::std::move(meta_val);
- return rv;
- } break;
- }
- throw "";
- }
- ValueRef get_value_ref(const ::MIR::LValue& lv)
- {
- ::HIR::TypeRef tmp;
- return get_value_and_type(lv, tmp);
- }
-
- ::HIR::TypeRef get_lvalue_ty(const ::MIR::LValue& lv)
- {
- ::HIR::TypeRef ty;
- get_value_and_type(lv, ty);
- return ty;
- }
-
- Value read_lvalue_with_ty(const ::MIR::LValue& lv, ::HIR::TypeRef& ty)
- {
- auto base_value = get_value_and_type(lv, ty);
-
- return base_value.read_value(0, ty.get_size());
- }
- Value read_lvalue(const ::MIR::LValue& lv)
- {
- ::HIR::TypeRef ty;
- return read_lvalue_with_ty(lv, ty);
- }
- void write_lvalue(const ::MIR::LValue& lv, Value val)
- {
- //LOG_DEBUG(lv << " = " << val);
- ::HIR::TypeRef ty;
- auto base_value = get_value_and_type(lv, ty);
-
- if(base_value.m_alloc) {
- base_value.m_alloc.alloc().write_value(base_value.m_offset, ::std::move(val));
- }
- else {
- base_value.m_value->write_value(base_value.m_offset, ::std::move(val));
- }
- }
-
- Value const_to_value(const ::MIR::Constant& c, ::HIR::TypeRef& ty)
- {
- switch(c.tag())
- {
- case ::MIR::Constant::TAGDEAD: throw "";
- TU_ARM(c, Int, ce) {
- ty = ::HIR::TypeRef(ce.t);
- Value val = Value(ty);
- val.write_bytes(0, &ce.v, ::std::min(ty.get_size(), sizeof(ce.v))); // TODO: Endian
- // TODO: If the write was clipped, sign-extend
- return val;
- } break;
- TU_ARM(c, Uint, ce) {
- ty = ::HIR::TypeRef(ce.t);
- Value val = Value(ty);
- val.write_bytes(0, &ce.v, ::std::min(ty.get_size(), sizeof(ce.v))); // TODO: Endian
- return val;
- } break;
- TU_ARM(c, Bool, ce) {
- Value val = Value(::HIR::TypeRef { RawType::Bool });
- val.write_bytes(0, &ce.v, 1);
- return val;
- } break;
- TU_ARM(c, Float, ce) {
- ty = ::HIR::TypeRef(ce.t);
- Value val = Value(ty);
- if( ce.t.raw_type == RawType::F64 ) {
- val.write_bytes(0, &ce.v, ::std::min(ty.get_size(), sizeof(ce.v))); // TODO: Endian/format?
- }
- else if( ce.t.raw_type == RawType::F32 ) {
- float v = static_cast<float>(ce.v);
- val.write_bytes(0, &v, ::std::min(ty.get_size(), sizeof(v))); // TODO: Endian/format?
- }
- else {
- throw ::std::runtime_error("BUG: Invalid type in Constant::Float");
- }
- return val;
- } break;
- TU_ARM(c, Const, ce) {
- LOG_BUG("Constant::Const in mmir");
- } break;
- TU_ARM(c, Bytes, ce) {
- LOG_TODO("Constant::Bytes");
- } break;
- TU_ARM(c, StaticString, ce) {
- ty = ::HIR::TypeRef(RawType::Str);
- ty.wrappers.push_back(TypeWrapper { TypeWrapper::Ty::Borrow, 0 });
- Value val = Value(ty);
- val.write_usize(0, 0);
- val.write_usize(POINTER_SIZE, ce.size());
- val.allocation.alloc().relocations.push_back(Relocation { 0, AllocationPtr::new_string(&ce) });
- LOG_DEBUG(c << " = " << val);
- //return Value::new_dataptr(ce.data());
- return val;
- } break;
- TU_ARM(c, ItemAddr, ce) {
- // Create a value with a special backing allocation of zero size that references the specified item.
- if( const auto* fn = modtree.get_function_opt(ce) ) {
- return Value::new_fnptr(ce);
- }
- LOG_TODO("Constant::ItemAddr - statics?");
- } break;
- }
- throw "";
- }
- Value const_to_value(const ::MIR::Constant& c)
- {
- ::HIR::TypeRef ty;
- return const_to_value(c, ty);
- }
- Value param_to_value(const ::MIR::Param& p, ::HIR::TypeRef& ty)
- {
- switch(p.tag())
- {
- case ::MIR::Param::TAGDEAD: throw "";
- TU_ARM(p, Constant, pe)
- return const_to_value(pe, ty);
- TU_ARM(p, LValue, pe)
- return read_lvalue_with_ty(pe, ty);
- }
- throw "";
- }
- Value param_to_value(const ::MIR::Param& p)
- {
- ::HIR::TypeRef ty;
- return param_to_value(p, ty);
- }
-
- ValueRef get_value_ref_param(const ::MIR::Param& p, Value& tmp, ::HIR::TypeRef& ty)
- {
- switch(p.tag())
- {
- case ::MIR::Param::TAGDEAD: throw "";
- TU_ARM(p, Constant, pe)
- tmp = const_to_value(pe, ty);
- return ValueRef(tmp, 0, ty.get_size());
- TU_ARM(p, LValue, pe)
- return get_value_and_type(pe, ty);
- }
- throw "";
- }
- } state { modtree, fcn, ret, ::std::move(args) };
-
- size_t bb_idx = 0;
- for(;;)
- {
- const auto& bb = fcn.m_mir.blocks.at(bb_idx);
-
- for(const auto& stmt : bb.statements)
- {
- LOG_DEBUG("BB" << bb_idx << "/" << (&stmt - bb.statements.data()) << ": " << stmt);
- switch(stmt.tag())
- {
- case ::MIR::Statement::TAGDEAD: throw "";
- TU_ARM(stmt, Assign, se) {
- Value new_val;
- switch(se.src.tag())
- {
- case ::MIR::RValue::TAGDEAD: throw "";
- TU_ARM(se.src, Use, re) {
- new_val = state.read_lvalue(re);
- } break;
- TU_ARM(se.src, Constant, re) {
- new_val = state.const_to_value(re);
- } break;
- TU_ARM(se.src, Borrow, re) {
- ::HIR::TypeRef src_ty;
- ValueRef src_base_value = state.get_value_and_type(re.val, src_ty);
- auto alloc = src_base_value.m_alloc;
- if( !alloc )
- {
- if( !src_base_value.m_value->allocation )
- {
- src_base_value.m_value->create_allocation();
- }
- alloc = AllocationPtr(src_base_value.m_value->allocation);
- }
- if( alloc.is_alloc() )
- LOG_DEBUG("- alloc=" << alloc << " (" << alloc.alloc() << ")");
- else
- LOG_DEBUG("- alloc=" << alloc);
- size_t ofs = src_base_value.m_offset;
- const auto* meta = src_ty.get_meta_type();
- bool is_slice_like = src_ty.has_slice_meta();
- src_ty.wrappers.insert(src_ty.wrappers.begin(), TypeWrapper { TypeWrapper::Ty::Borrow, static_cast<size_t>(re.type) });
-
- new_val = Value(src_ty);
- // ^ Pointer value
- new_val.write_usize(0, ofs);
- if( meta )
- {
- LOG_ASSERT(src_base_value.m_metadata, "Borrow of an unsized value, but no metadata avaliable");
- new_val.write_value(POINTER_SIZE, *src_base_value.m_metadata);
- }
- // - Add the relocation after writing the value (writing clears the relocations)
- new_val.allocation.alloc().relocations.push_back(Relocation { 0, ::std::move(alloc) });
- } break;
- TU_ARM(se.src, Cast, re) {
- // Determine the type of cast, is it a reinterpret or is it a value transform?
- // - Float <-> integer is a transform, anything else should be a reinterpret.
- ::HIR::TypeRef src_ty;
- auto src_value = state.get_value_and_type(re.val, src_ty);
-
- new_val = Value(re.type);
- if( re.type == src_ty )
- {
- // No-op cast
- new_val = src_value.read_value(0, re.type.get_size());
- }
- else if( !re.type.wrappers.empty() )
- {
- // Destination can only be a raw pointer
- if( re.type.wrappers.at(0).type != TypeWrapper::Ty::Pointer ) {
- throw "ERROR";
- }
- if( !src_ty.wrappers.empty() )
- {
- // Source can be either
- if( src_ty.wrappers.at(0).type != TypeWrapper::Ty::Pointer
- && src_ty.wrappers.at(0).type != TypeWrapper::Ty::Borrow ) {
- throw "ERROR";
- }
-
- if( src_ty.get_size() > re.type.get_size() ) {
- // TODO: How to casting fat to thin?
- //LOG_TODO("Handle casting fat to thin, " << src_ty << " -> " << re.type);
- new_val = src_value.read_value(0, re.type.get_size());
- }
- else
- {
- new_val = src_value.read_value(0, re.type.get_size());
- }
- }
- else
- {
- if( src_ty == RawType::Function )
- {
- }
- else if( src_ty == RawType::USize )
- {
- }
- else
- {
- ::std::cerr << "ERROR: Trying to pointer (" << re.type <<" ) from invalid type (" << src_ty << ")\n";
- throw "ERROR";
- }
- new_val = src_value.read_value(0, re.type.get_size());
- }
- }
- else if( !src_ty.wrappers.empty() )
- {
- // TODO: top wrapper MUST be a pointer
- if( src_ty.wrappers.at(0).type != TypeWrapper::Ty::Pointer
- && src_ty.wrappers.at(0).type != TypeWrapper::Ty::Borrow ) {
- throw "ERROR";
- }
- // TODO: MUST be a thin pointer?
-
- // TODO: MUST be an integer (usize only?)
- if( re.type != RawType::USize && re.type != RawType::ISize ) {
- LOG_ERROR("Casting from a pointer to non-usize - " << re.type << " to " << src_ty);
- throw "ERROR";
- }
- new_val = src_value.read_value(0, re.type.get_size());
- }
- else
- {
- // TODO: What happens if there'a cast of something with a relocation?
- switch(re.type.inner_type)
- {
- case RawType::Unreachable: throw "BUG";
- case RawType::Composite: throw "ERROR";
- case RawType::TraitObject: throw "ERROR";
- case RawType::Function: throw "ERROR";
- case RawType::Str: throw "ERROR";
- case RawType::Unit: throw "ERROR";
- case RawType::F32: {
- float dst_val = 0.0;
- // Can be an integer, or F64 (pointer is impossible atm)
- switch(src_ty.inner_type)
- {
- case RawType::Unreachable: throw "BUG";
- case RawType::Composite: throw "ERROR";
- case RawType::TraitObject: throw "ERROR";
- case RawType::Function: throw "ERROR";
- case RawType::Char: throw "ERROR";
- case RawType::Str: throw "ERROR";
- case RawType::Unit: throw "ERROR";
- case RawType::Bool: throw "ERROR";
- case RawType::F32: throw "BUG";
- case RawType::F64: dst_val = static_cast<float>( src_value.read_f64(0) ); break;
- case RawType::USize: throw "TODO";// /*dst_val = src_value.read_usize();*/ break;
- case RawType::ISize: throw "TODO";// /*dst_val = src_value.read_isize();*/ break;
- case RawType::U8: dst_val = static_cast<float>( src_value.read_u8 (0) ); break;
- case RawType::I8: dst_val = static_cast<float>( src_value.read_i8 (0) ); break;
- case RawType::U16: dst_val = static_cast<float>( src_value.read_u16(0) ); break;
- case RawType::I16: dst_val = static_cast<float>( src_value.read_i16(0) ); break;
- case RawType::U32: dst_val = static_cast<float>( src_value.read_u32(0) ); break;
- case RawType::I32: dst_val = static_cast<float>( src_value.read_i32(0) ); break;
- case RawType::U64: dst_val = static_cast<float>( src_value.read_u64(0) ); break;
- case RawType::I64: dst_val = static_cast<float>( src_value.read_i64(0) ); break;
- case RawType::U128: throw "TODO";// /*dst_val = src_value.read_u128();*/ break;
- case RawType::I128: throw "TODO";// /*dst_val = src_value.read_i128();*/ break;
- }
- new_val.write_f32(0, dst_val);
- } break;
- case RawType::F64: {
- double dst_val = 0.0;
- // Can be an integer, or F32 (pointer is impossible atm)
- switch(src_ty.inner_type)
- {
- case RawType::Unreachable: throw "BUG";
- case RawType::Composite: throw "ERROR";
- case RawType::TraitObject: throw "ERROR";
- case RawType::Function: throw "ERROR";
- case RawType::Char: throw "ERROR";
- case RawType::Str: throw "ERROR";
- case RawType::Unit: throw "ERROR";
- case RawType::Bool: throw "ERROR";
- case RawType::F64: throw "BUG";
- case RawType::F32: dst_val = static_cast<double>( src_value.read_f32(0) ); break;
- case RawType::USize: dst_val = static_cast<double>( src_value.read_usize(0) ); break;
- case RawType::ISize: dst_val = static_cast<double>( src_value.read_isize(0) ); break;
- case RawType::U8: dst_val = static_cast<double>( src_value.read_u8 (0) ); break;
- case RawType::I8: dst_val = static_cast<double>( src_value.read_i8 (0) ); break;
- case RawType::U16: dst_val = static_cast<double>( src_value.read_u16(0) ); break;
- case RawType::I16: dst_val = static_cast<double>( src_value.read_i16(0) ); break;
- case RawType::U32: dst_val = static_cast<double>( src_value.read_u32(0) ); break;
- case RawType::I32: dst_val = static_cast<double>( src_value.read_i32(0) ); break;
- case RawType::U64: dst_val = static_cast<double>( src_value.read_u64(0) ); break;
- case RawType::I64: dst_val = static_cast<double>( src_value.read_i64(0) ); break;
- case RawType::U128: throw "TODO"; /*dst_val = src_value.read_u128();*/ break;
- case RawType::I128: throw "TODO"; /*dst_val = src_value.read_i128();*/ break;
- }
- new_val.write_f64(0, dst_val);
- } break;
- case RawType::Bool:
- LOG_TODO("Cast to " << re.type);
- case RawType::Char:
- LOG_TODO("Cast to " << re.type);
- case RawType::USize:
- case RawType::U8:
- case RawType::U16:
- case RawType::U32:
- case RawType::U64:
- case RawType::ISize:
- case RawType::I8:
- case RawType::I16:
- case RawType::I32:
- case RawType::I64:
- {
- uint64_t dst_val = 0;
- // Can be an integer, or F32 (pointer is impossible atm)
- switch(src_ty.inner_type)
- {
- case RawType::Unreachable:
- LOG_BUG("Casting unreachable");
- case RawType::TraitObject:
- case RawType::Str:
- LOG_FATAL("Cast of unsized type - " << src_ty);
- case RawType::Function:
- LOG_ASSERT(re.type.inner_type == RawType::USize, "Function pointers can only be casted to usize, instead " << re.type);
- new_val = src_value.read_value(0, re.type.get_size());
- break;
- case RawType::Char:
- LOG_ASSERT(re.type.inner_type == RawType::U32, "Char can only be casted to u32, instead " << re.type);
- new_val = src_value.read_value(0, 4);
- break;
- case RawType::Unit:
- LOG_FATAL("Cast of unit");
- case RawType::Composite: {
- const auto& dt = *src_ty.composite_type;
- if( dt.variants.size() == 0 ) {
- LOG_FATAL("Cast of composite - " << src_ty);
- }
- // TODO: Check that all variants have the same tag offset
- LOG_ASSERT(dt.fields.size() == 1, "");
- LOG_ASSERT(dt.fields[0].first == 0, "");
- for(size_t i = 0; i < dt.variants.size(); i ++ ) {
- LOG_ASSERT(dt.variants[i].base_field == 0, "");
- LOG_ASSERT(dt.variants[i].field_path.empty(), "");
- }
- ::HIR::TypeRef tag_ty = dt.fields[0].second;
- LOG_ASSERT(tag_ty.wrappers.empty(), "");
- switch(tag_ty.inner_type)
- {
- case RawType::USize:
- dst_val = static_cast<uint64_t>( src_value.read_usize(0) );
- if(0)
- case RawType::ISize:
- dst_val = static_cast<uint64_t>( src_value.read_isize(0) );
- if(0)
- case RawType::U8:
- dst_val = static_cast<uint64_t>( src_value.read_u8 (0) );
- if(0)
- case RawType::I8:
- dst_val = static_cast<uint64_t>( src_value.read_i8 (0) );
- if(0)
- case RawType::U16:
- dst_val = static_cast<uint64_t>( src_value.read_u16(0) );
- if(0)
- case RawType::I16:
- dst_val = static_cast<uint64_t>( src_value.read_i16(0) );
- if(0)
- case RawType::U32:
- dst_val = static_cast<uint64_t>( src_value.read_u32(0) );
- if(0)
- case RawType::I32:
- dst_val = static_cast<uint64_t>( src_value.read_i32(0) );
- if(0)
- case RawType::U64:
- dst_val = static_cast<uint64_t>( src_value.read_u64(0) );
- if(0)
- case RawType::I64:
- dst_val = static_cast<uint64_t>( src_value.read_i64(0) );
- break;
- default:
- LOG_FATAL("Bad tag type in cast - " << tag_ty);
- }
- } if(0)
- case RawType::Bool:
- dst_val = static_cast<uint64_t>( src_value.read_u8 (0) );
- if(0)
- case RawType::F64:
- dst_val = static_cast<uint64_t>( src_value.read_f64(0) );
- if(0)
- case RawType::F32:
- dst_val = static_cast<uint64_t>( src_value.read_f32(0) );
- if(0)
- case RawType::USize:
- dst_val = static_cast<uint64_t>( src_value.read_usize(0) );
- if(0)
- case RawType::ISize:
- dst_val = static_cast<uint64_t>( src_value.read_isize(0) );
- if(0)
- case RawType::U8:
- dst_val = static_cast<uint64_t>( src_value.read_u8 (0) );
- if(0)
- case RawType::I8:
- dst_val = static_cast<uint64_t>( src_value.read_i8 (0) );
- if(0)
- case RawType::U16:
- dst_val = static_cast<uint64_t>( src_value.read_u16(0) );
- if(0)
- case RawType::I16:
- dst_val = static_cast<uint64_t>( src_value.read_i16(0) );
- if(0)
- case RawType::U32:
- dst_val = static_cast<uint64_t>( src_value.read_u32(0) );
- if(0)
- case RawType::I32:
- dst_val = static_cast<uint64_t>( src_value.read_i32(0) );
- if(0)
- case RawType::U64:
- dst_val = static_cast<uint64_t>( src_value.read_u64(0) );
- if(0)
- case RawType::I64:
- dst_val = static_cast<uint64_t>( src_value.read_i64(0) );
-
- switch(re.type.inner_type)
- {
- case RawType::USize:
- new_val.write_usize(0, dst_val);
- break;
- case RawType::U8:
- new_val.write_u8(0, static_cast<uint8_t>(dst_val));
- break;
- case RawType::U16:
- new_val.write_u16(0, static_cast<uint16_t>(dst_val));
- break;
- case RawType::U32:
- new_val.write_u32(0, static_cast<uint32_t>(dst_val));
- break;
- case RawType::U64:
- new_val.write_u64(0, dst_val);
- break;
- case RawType::ISize:
- new_val.write_usize(0, static_cast<int64_t>(dst_val));
- break;
- case RawType::I8:
- new_val.write_i8(0, static_cast<int8_t>(dst_val));
- break;
- case RawType::I16:
- new_val.write_i16(0, static_cast<int16_t>(dst_val));
- break;
- case RawType::I32:
- new_val.write_i32(0, static_cast<int32_t>(dst_val));
- break;
- case RawType::I64:
- new_val.write_i64(0, static_cast<int64_t>(dst_val));
- break;
- default:
- throw "";
- }
- break;
- case RawType::U128: throw "TODO"; /*dst_val = src_value.read_u128();*/ break;
- case RawType::I128: throw "TODO"; /*dst_val = src_value.read_i128();*/ break;
- }
- } break;
- case RawType::U128:
- case RawType::I128:
- LOG_TODO("Cast to " << re.type);
- }
- }
- } break;
- TU_ARM(se.src, BinOp, re) {
- ::HIR::TypeRef ty_l, ty_r;
- Value tmp_l, tmp_r;
- auto v_l = state.get_value_ref_param(re.val_l, tmp_l, ty_l);
- auto v_r = state.get_value_ref_param(re.val_r, tmp_r, ty_r);
- LOG_DEBUG(v_l << " (" << ty_l <<") ? " << v_r << " (" << ty_r <<")");
-
- switch(re.op)
- {
- case ::MIR::eBinOp::EQ:
- case ::MIR::eBinOp::NE:
- case ::MIR::eBinOp::GT:
- case ::MIR::eBinOp::GE:
- case ::MIR::eBinOp::LT:
- case ::MIR::eBinOp::LE: {
- LOG_ASSERT(ty_l == ty_r, "BinOp type mismatch - " << ty_l << " != " << ty_r);
- int res = 0;
- // TODO: Handle comparison of the relocations too
-
- const auto& alloc_l = v_l.m_value ? v_l.m_value->allocation : v_l.m_alloc;
- const auto& alloc_r = v_r.m_value ? v_r.m_value->allocation : v_r.m_alloc;
- auto reloc_l = alloc_l ? v_l.get_relocation(v_l.m_offset) : AllocationPtr();
- auto reloc_r = alloc_r ? v_r.get_relocation(v_r.m_offset) : AllocationPtr();
-
- if( reloc_l != reloc_r )
- {
- res = (reloc_l < reloc_r ? -1 : 1);
- }
- LOG_DEBUG("res=" << res << ", " << reloc_l << " ? " << reloc_r);
-
- if( ty_l.wrappers.empty() )
- {
- switch(ty_l.inner_type)
- {
- case RawType::U64: res = res != 0 ? res : Ops::do_compare(v_l.read_u64(0), v_r.read_u64(0)); break;
- case RawType::U32: res = res != 0 ? res : Ops::do_compare(v_l.read_u32(0), v_r.read_u32(0)); break;
- case RawType::U16: res = res != 0 ? res : Ops::do_compare(v_l.read_u16(0), v_r.read_u16(0)); break;
- case RawType::U8 : res = res != 0 ? res : Ops::do_compare(v_l.read_u8 (0), v_r.read_u8 (0)); break;
- case RawType::I64: res = res != 0 ? res : Ops::do_compare(v_l.read_i64(0), v_r.read_i64(0)); break;
- case RawType::I32: res = res != 0 ? res : Ops::do_compare(v_l.read_i32(0), v_r.read_i32(0)); break;
- case RawType::I16: res = res != 0 ? res : Ops::do_compare(v_l.read_i16(0), v_r.read_i16(0)); break;
- case RawType::I8 : res = res != 0 ? res : Ops::do_compare(v_l.read_i8 (0), v_r.read_i8 (0)); break;
- case RawType::USize: res = res != 0 ? res : Ops::do_compare(v_l.read_usize(0), v_r.read_usize(0)); break;
- case RawType::ISize: res = res != 0 ? res : Ops::do_compare(v_l.read_isize(0), v_r.read_isize(0)); break;
- default:
- LOG_TODO("BinOp comparisons - " << se.src << " w/ " << ty_l);
- }
- }
- else if( ty_l.wrappers.front().type == TypeWrapper::Ty::Pointer )
- {
- // TODO: Technically only EQ/NE are valid.
-
- res = res != 0 ? res : Ops::do_compare(v_l.read_usize(0), v_r.read_usize(0));
-
- // Compare fat metadata.
- if( res == 0 && v_l.m_size > POINTER_SIZE )
- {
- reloc_l = alloc_l ? alloc_l.alloc().get_relocation(POINTER_SIZE) : AllocationPtr();
- reloc_r = alloc_r ? alloc_r.alloc().get_relocation(POINTER_SIZE) : AllocationPtr();
-
- if( res == 0 && reloc_l != reloc_r )
- {
- res = (reloc_l < reloc_r ? -1 : 1);
- }
- res = res != 0 ? res : Ops::do_compare(v_l.read_usize(POINTER_SIZE), v_r.read_usize(POINTER_SIZE));
- }
- }
- else
- {
- LOG_TODO("BinOp comparisons - " << se.src << " w/ " << ty_l);
- }
- bool res_bool;
- switch(re.op)
- {
- case ::MIR::eBinOp::EQ: res_bool = (res == 0); break;
- case ::MIR::eBinOp::NE: res_bool = (res != 0); break;
- case ::MIR::eBinOp::GT: res_bool = (res == 1); break;
- case ::MIR::eBinOp::GE: res_bool = (res == 1 || res == 0); break;
- case ::MIR::eBinOp::LT: res_bool = (res == -1); break;
- case ::MIR::eBinOp::LE: res_bool = (res == -1 || res == 0); break;
- break;
- default:
- LOG_BUG("Unknown comparison");
- }
- new_val = Value(::HIR::TypeRef(RawType::Bool));
- new_val.write_u8(0, res_bool ? 1 : 0);
- } break;
- case ::MIR::eBinOp::BIT_SHL:
- case ::MIR::eBinOp::BIT_SHR: {
- LOG_ASSERT(ty_l.wrappers.empty(), "Bitwise operator on non-primitive - " << ty_l);
- LOG_ASSERT(ty_r.wrappers.empty(), "Bitwise operator with non-primitive - " << ty_r);
- size_t max_bits = ty_r.get_size() * 8;
- uint8_t shift;
- auto check_cast = [&](auto v){ LOG_ASSERT(0 <= v && v <= max_bits, "Shift out of range - " << v); return static_cast<uint8_t>(v); };
- switch(ty_r.inner_type)
- {
- case RawType::U64: shift = check_cast(v_r.read_u64(0)); break;
- case RawType::U32: shift = check_cast(v_r.read_u32(0)); break;
- case RawType::U16: shift = check_cast(v_r.read_u16(0)); break;
- case RawType::U8 : shift = check_cast(v_r.read_u8 (0)); break;
- case RawType::I64: shift = check_cast(v_r.read_i64(0)); break;
- case RawType::I32: shift = check_cast(v_r.read_i32(0)); break;
- case RawType::I16: shift = check_cast(v_r.read_i16(0)); break;
- case RawType::I8 : shift = check_cast(v_r.read_i8 (0)); break;
- case RawType::USize: shift = check_cast(v_r.read_usize(0)); break;
- case RawType::ISize: shift = check_cast(v_r.read_isize(0)); break;
- default:
- LOG_TODO("BinOp shift rhs unknown type - " << se.src << " w/ " << ty_r);
- }
- new_val = Value(ty_l);
- switch(ty_l.inner_type)
- {
- case RawType::U64: new_val.write_u64(0, Ops::do_bitwise(v_l.read_u64(0), static_cast<uint64_t>(shift), re.op)); break;
- case RawType::U32: new_val.write_u32(0, Ops::do_bitwise(v_l.read_u32(0), static_cast<uint32_t>(shift), re.op)); break;
- case RawType::U16: new_val.write_u16(0, Ops::do_bitwise(v_l.read_u16(0), static_cast<uint16_t>(shift), re.op)); break;
- case RawType::U8 : new_val.write_u8 (0, Ops::do_bitwise(v_l.read_u8 (0), static_cast<uint8_t >(shift), re.op)); break;
- case RawType::USize: new_val.write_usize(0, Ops::do_bitwise(v_l.read_usize(0), static_cast<uint64_t>(shift), re.op)); break;
- default:
- LOG_TODO("BinOp shift rhs unknown type - " << se.src << " w/ " << ty_r);
- }
- } break;
- case ::MIR::eBinOp::BIT_AND:
- case ::MIR::eBinOp::BIT_OR:
- case ::MIR::eBinOp::BIT_XOR:
- LOG_ASSERT(ty_l == ty_r, "BinOp type mismatch - " << ty_l << " != " << ty_r);
- LOG_ASSERT(ty_l.wrappers.empty(), "Bitwise operator on non-primitive - " << ty_l);
- new_val = Value(ty_l);
- switch(ty_l.inner_type)
- {
- case RawType::U64:
- new_val.write_u64( 0, Ops::do_bitwise(v_l.read_u64(0), v_r.read_u64(0), re.op) );
- break;
- case RawType::U32:
- new_val.write_u32( 0, static_cast<uint32_t>(Ops::do_bitwise(v_l.read_u32(0), v_r.read_u32(0), re.op)) );
- break;
- case RawType::U16:
- new_val.write_u16( 0, static_cast<uint16_t>(Ops::do_bitwise(v_l.read_u16(0), v_r.read_u16(0), re.op)) );
- break;
- case RawType::U8:
- new_val.write_u8 ( 0, static_cast<uint8_t >(Ops::do_bitwise(v_l.read_u8 (0), v_r.read_u8 (0), re.op)) );
- break;
- case RawType::USize:
- new_val.write_usize( 0, Ops::do_bitwise(v_l.read_usize(0), v_r.read_usize(0), re.op) );
- break;
- default:
- LOG_TODO("BinOp bitwise - " << se.src << " w/ " << ty_l);
- }
-
- break;
- default:
- LOG_ASSERT(ty_l == ty_r, "BinOp type mismatch - " << ty_l << " != " << ty_r);
- auto val_l = PrimitiveValueVirt::from_value(ty_l, v_l);
- auto val_r = PrimitiveValueVirt::from_value(ty_r, v_r);
- switch(re.op)
- {
- case ::MIR::eBinOp::ADD: val_l.get().add( val_r.get() ); break;
- case ::MIR::eBinOp::SUB: val_l.get().subtract( val_r.get() ); break;
- case ::MIR::eBinOp::MUL: val_l.get().multiply( val_r.get() ); break;
- case ::MIR::eBinOp::DIV: val_l.get().divide( val_r.get() ); break;
- case ::MIR::eBinOp::MOD: val_l.get().modulo( val_r.get() ); break;
-
- default:
- LOG_TODO("Unsupported binary operator?");
- }
- new_val = Value(ty_l);
- val_l.get().write_to_value(new_val, 0);
- break;
- }
- } break;
- TU_ARM(se.src, UniOp, re) {
- ::HIR::TypeRef ty;
- auto v = state.get_value_and_type(re.val, ty);
- LOG_ASSERT(ty.wrappers.empty(), "UniOp on wrapped type - " << ty);
- new_val = Value(ty);
- switch(re.op)
- {
- case ::MIR::eUniOp::INV:
- switch(ty.inner_type)
- {
- case RawType::U128:
- LOG_TODO("UniOp::INV U128");
- case RawType::U64:
- new_val.write_u64( 0, ~v.read_u64(0) );
- break;
- case RawType::U32:
- new_val.write_u32( 0, ~v.read_u32(0) );
- break;
- case RawType::U16:
- new_val.write_u16( 0, ~v.read_u16(0) );
- break;
- case RawType::U8:
- new_val.write_u8 ( 0, ~v.read_u8 (0) );
- break;
- case RawType::USize:
- new_val.write_usize( 0, ~v.read_usize(0) );
- break;
- case RawType::Bool:
- new_val.write_u8 ( 0, v.read_u8 (0) == 0 );
- break;
- default:
- LOG_TODO("UniOp::INV - w/ type " << ty);
- }
- break;
- case ::MIR::eUniOp::NEG:
- switch(ty.inner_type)
- {
- case RawType::I128:
- LOG_TODO("UniOp::NEG I128");
- case RawType::I64:
- new_val.write_i64( 0, -v.read_i64(0) );
- break;
- case RawType::I32:
- new_val.write_i32( 0, -v.read_i32(0) );
- break;
- case RawType::I16:
- new_val.write_i16( 0, -v.read_i16(0) );
- break;
- case RawType::I8:
- new_val.write_i8 ( 0, -v.read_i8 (0) );
- break;
- case RawType::ISize:
- new_val.write_isize( 0, -v.read_isize(0) );
- break;
- default:
- LOG_TODO("UniOp::INV - w/ type " << ty);
- }
- break;
- }
- } break;
- TU_ARM(se.src, DstMeta, re) {
- LOG_TODO(stmt);
- } break;
- TU_ARM(se.src, DstPtr, re) {
- LOG_TODO(stmt);
- } break;
- TU_ARM(se.src, MakeDst, re) {
- // - Get target type, just for some assertions
- ::HIR::TypeRef dst_ty;
- state.get_value_and_type(se.dst, dst_ty);
- new_val = Value(dst_ty);
-
- auto ptr = state.param_to_value(re.ptr_val );
- auto meta = state.param_to_value(re.meta_val);
- LOG_DEBUG("ty=" << dst_ty << ", ptr=" << ptr << ", meta=" << meta);
-
- new_val.write_value(0, ::std::move(ptr));
- new_val.write_value(POINTER_SIZE, ::std::move(meta));
- } break;
- TU_ARM(se.src, Tuple, re) {
- ::HIR::TypeRef dst_ty;
- state.get_value_and_type(se.dst, dst_ty);
- new_val = Value(dst_ty);
-
- for(size_t i = 0; i < re.vals.size(); i++)
- {
- auto fld_ofs = dst_ty.composite_type->fields.at(i).first;
- new_val.write_value(fld_ofs, state.param_to_value(re.vals[i]));
- }
- } break;
- TU_ARM(se.src, Array, re) {
- ::HIR::TypeRef dst_ty;
- state.get_value_and_type(se.dst, dst_ty);
- new_val = Value(dst_ty);
- // TODO: Assert that type is an array
- auto inner_ty = dst_ty.get_inner();
- size_t stride = inner_ty.get_size();
-
- size_t ofs = 0;
- for(const auto& v : re.vals)
- {
- new_val.write_value(ofs, state.param_to_value(v));
- ofs += stride;
- }
- } break;
- TU_ARM(se.src, SizedArray, re) {
- ::HIR::TypeRef dst_ty;
- state.get_value_and_type(se.dst, dst_ty);
- new_val = Value(dst_ty);
- // TODO: Assert that type is an array
- auto inner_ty = dst_ty.get_inner();
- size_t stride = inner_ty.get_size();
-
- size_t ofs = 0;
- for(size_t i = 0; i < re.count; i++)
- {
- new_val.write_value(ofs, state.param_to_value(re.val));
- ofs += stride;
- }
- } break;
- TU_ARM(se.src, Variant, re) {
- // 1. Get the composite by path.
- const auto& data_ty = state.modtree.get_composite(re.path);
- auto dst_ty = ::HIR::TypeRef(&data_ty);
- new_val = Value(dst_ty);
- LOG_DEBUG("Variant " << new_val);
- // Three cases:
- // - Unions (no tag)
- // - Data enums (tag and data)
- // - Value enums (no data)
- const auto& var = data_ty.variants.at(re.index);
- if( var.data_field != SIZE_MAX )
- {
- const auto& fld = data_ty.fields.at(re.index);
-
- new_val.write_value(fld.first, state.param_to_value(re.val));
- }
- LOG_DEBUG("Variant " << new_val);
- if( var.base_field != SIZE_MAX )
- {
- ::HIR::TypeRef tag_ty;
- size_t tag_ofs = dst_ty.get_field_ofs(var.base_field, var.field_path, tag_ty);
- LOG_ASSERT(tag_ty.get_size() == var.tag_data.size(), "");
- new_val.write_bytes(tag_ofs, var.tag_data.data(), var.tag_data.size());
- }
- else
- {
- // Union, no tag
- }
- LOG_DEBUG("Variant " << new_val);
- } break;
- TU_ARM(se.src, Struct, re) {
- const auto& data_ty = state.modtree.get_composite(re.path);
-
- ::HIR::TypeRef dst_ty;
- state.get_value_and_type(se.dst, dst_ty);
- new_val = Value(dst_ty);
- LOG_ASSERT(dst_ty.composite_type == &data_ty, "Destination type of RValue::Struct isn't the same as the input");
-
- for(size_t i = 0; i < re.vals.size(); i++)
- {
- auto fld_ofs = data_ty.fields.at(i).first;
- new_val.write_value(fld_ofs, state.param_to_value(re.vals[i]));
- }
- } break;
- }
- LOG_DEBUG("- " << new_val);
- state.write_lvalue(se.dst, ::std::move(new_val));
- } break;
- case ::MIR::Statement::TAG_Asm:
- LOG_TODO(stmt);
- break;
- TU_ARM(stmt, Drop, se) {
- if( se.flag_idx == ~0u || state.drop_flags.at(se.flag_idx) )
- {
- ::HIR::TypeRef ty;
- auto v = state.get_value_and_type(se.slot, ty);
-
- // - Take a pointer to the inner
- auto alloc = v.m_alloc;
- if( !alloc )
- {
- if( !v.m_value->allocation )
- {
- v.m_value->create_allocation();
- }
- alloc = AllocationPtr(v.m_value->allocation);
- }
- size_t ofs = v.m_offset;
- assert(!ty.get_meta_type());
-
- auto ptr_ty = ty.wrap(TypeWrapper::Ty::Borrow, 2);
-
- auto ptr_val = Value(ptr_ty);
- ptr_val.write_usize(0, ofs);
- ptr_val.allocation.alloc().relocations.push_back(Relocation { 0, ::std::move(alloc) });
-
- drop_value(modtree, ptr_val, ty);
- // TODO: Clear validity on the entire inner value.
- //alloc.mark_as_freed();
- }
- } break;
- TU_ARM(stmt, SetDropFlag, se) {
- bool val = (se.other == ~0 ? false : state.drop_flags.at(se.other)) != se.new_val;
- LOG_DEBUG("- " << val);
- state.drop_flags.at(se.idx) = val;
- } break;
- case ::MIR::Statement::TAG_ScopeEnd:
- LOG_TODO(stmt);
- break;
- }
- }
-
- LOG_DEBUG("BB" << bb_idx << "/TERM: " << bb.terminator);
- switch(bb.terminator.tag())
- {
- case ::MIR::Terminator::TAGDEAD: throw "";
- TU_ARM(bb.terminator, Incomplete, _te)
- LOG_TODO("Terminator::Incomplete hit");
- TU_ARM(bb.terminator, Diverge, _te)
- LOG_TODO("Terminator::Diverge hit");
- TU_ARM(bb.terminator, Panic, _te)
- LOG_TODO("Terminator::Panic");
- TU_ARM(bb.terminator, Goto, te)
- bb_idx = te;
- continue;
- TU_ARM(bb.terminator, Return, _te)
- LOG_DEBUG("RETURN " << state.ret);
- return state.ret;
- TU_ARM(bb.terminator, If, te) {
- uint8_t v = state.get_value_ref(te.cond).read_u8(0);
- LOG_ASSERT(v == 0 || v == 1, "");
- bb_idx = v ? te.bb0 : te.bb1;
- } continue;
- TU_ARM(bb.terminator, Switch, te) {
- ::HIR::TypeRef ty;
- auto v = state.get_value_and_type(te.val, ty);
- LOG_ASSERT(ty.wrappers.size() == 0, "" << ty);
- LOG_ASSERT(ty.inner_type == RawType::Composite, "" << ty);
-
- // TODO: Convert the variant list into something that makes it easier to switch on.
- size_t found_target = SIZE_MAX;
- size_t default_target = SIZE_MAX;
- for(size_t i = 0; i < ty.composite_type->variants.size(); i ++)
- {
- const auto& var = ty.composite_type->variants[i];
- if( var.tag_data.size() == 0 )
- {
- // Save as the default, error for multiple defaults
- if( default_target != SIZE_MAX )
- {
- LOG_FATAL("Two variants with no tag in Switch");
- }
- default_target = i;
- }
- else
- {
- // Get offset, read the value.
- ::HIR::TypeRef tag_ty;
- size_t tag_ofs = ty.get_field_ofs(var.base_field, var.field_path, tag_ty);
- // Read the value bytes
- ::std::vector<char> tmp( var.tag_data.size() );
- v.read_bytes(tag_ofs, const_cast<char*>(tmp.data()), tmp.size());
- if( v.get_relocation(tag_ofs) )
- continue ;
- if( ::std::memcmp(tmp.data(), var.tag_data.data(), tmp.size()) == 0 )
- {
- found_target = i;
- break ;
- }
- }
- }
-
- if( found_target == SIZE_MAX )
- {
- found_target = default_target;
- }
- if( found_target == SIZE_MAX )
- {
- LOG_FATAL("Terminator::Switch on " << ty << " didn't find a variant");
- }
- bb_idx = te.targets.at(found_target);
- } continue;
- TU_ARM(bb.terminator, SwitchValue, _te)
- LOG_TODO("Terminator::SwitchValue");
- TU_ARM(bb.terminator, Call, te) {
- ::std::vector<Value> sub_args; sub_args.reserve(te.args.size());
- for(const auto& a : te.args)
- {
- sub_args.push_back( state.param_to_value(a) );
- }
- if( te.fcn.is_Intrinsic() )
- {
- const auto& fe = te.fcn.as_Intrinsic();
- state.write_lvalue(te.ret_val, MIRI_Invoke_Intrinsic(modtree, fe.name, fe.params, ::std::move(sub_args)));
- }
- else
- {
- const ::HIR::Path* fcn_p;
- if( te.fcn.is_Path() ) {
- fcn_p = &te.fcn.as_Path();
- }
- else {
- ::HIR::TypeRef ty;
- auto v = state.get_value_and_type(te.fcn.as_Value(), ty);
- // TODO: Assert type
- // TODO: Assert offset/content.
- assert(v.read_usize(v.m_offset) == 0);
- auto& alloc_ptr = v.m_alloc ? v.m_alloc : v.m_value->allocation;
- LOG_ASSERT(alloc_ptr, "Calling value that can't be a pointer (no allocation)");
- auto& fcn_alloc_ptr = alloc_ptr.alloc().get_relocation(v.m_offset);
- LOG_ASSERT(fcn_alloc_ptr, "Calling value with no relocation");
- LOG_ASSERT(fcn_alloc_ptr.get_ty() == AllocationPtr::Ty::Function, "Calling value that isn't a function pointer");
- fcn_p = &fcn_alloc_ptr.fcn();
- }
-
- LOG_DEBUG("Call " << *fcn_p);
- auto v = MIRI_Invoke(modtree, *fcn_p, ::std::move(sub_args));
- LOG_DEBUG(te.ret_val << " = " << v << " (resume " << path << ")");
- state.write_lvalue(te.ret_val, ::std::move(v));
- }
- bb_idx = te.ret_block;
- } continue;
- }
- throw "";
- }
-
- throw "";
-}
-Value MIRI_Invoke_Extern(const ::std::string& link_name, const ::std::string& abi, ::std::vector<Value> args)
-{
- if( link_name == "__rust_allocate" )
- {
- auto size = args.at(0).read_usize(0);
- auto align = args.at(1).read_usize(0);
- LOG_DEBUG("__rust_allocate(size=" << size << ", align=" << align << ")");
- ::HIR::TypeRef rty { RawType::Unit };
- rty.wrappers.push_back({ TypeWrapper::Ty::Pointer, 0 });
- Value rv = Value(rty);
- rv.write_usize(0, 0);
- // TODO: Use the alignment when making an allocation?
- rv.allocation.alloc().relocations.push_back({ 0, Allocation::new_alloc(size) });
- return rv;
- }
- else if( link_name == "__rust_reallocate" )
- {
- LOG_ASSERT(args.at(0).allocation, "__rust_reallocate first argument doesn't have an allocation");
- auto alloc_ptr = args.at(0).allocation.alloc().get_relocation(0);
- auto ptr_ofs = args.at(0).read_usize(0);
- LOG_ASSERT(ptr_ofs == 0, "__rust_reallocate with offset pointer");
- auto oldsize = args.at(1).read_usize(0);
- auto newsize = args.at(2).read_usize(0);
- auto align = args.at(3).read_usize(0);
- LOG_DEBUG("__rust_reallocate(ptr=" << alloc_ptr << ", oldsize=" << oldsize << ", newsize=" << newsize << ", align=" << align << ")");
-
- LOG_ASSERT(alloc_ptr, "__rust_reallocate with no backing allocation attached to pointer");
- LOG_ASSERT(alloc_ptr.is_alloc(), "__rust_reallocate with no backing allocation attached to pointer");
- auto& alloc = alloc_ptr.alloc();
- // TODO: Check old size and alignment against allocation.
- alloc.data.resize( (newsize + 8-1) / 8 );
- alloc.mask.resize( (newsize + 8-1) / 8 );
- // TODO: Should this instead make a new allocation to catch use-after-free?
- return ::std::move(args.at(0));
- }
- else if( link_name == "__rust_deallocate" )
- {
- LOG_ASSERT(args.at(0).allocation, "__rust_deallocate first argument doesn't have an allocation");
- auto alloc_ptr = args.at(0).allocation.alloc().get_relocation(0);
- auto ptr_ofs = args.at(0).read_usize(0);
- LOG_ASSERT(ptr_ofs == 0, "__rust_deallocate with offset pointer");
-
- LOG_ASSERT(alloc_ptr, "__rust_deallocate with no backing allocation attached to pointer");
- LOG_ASSERT(alloc_ptr.is_alloc(), "__rust_deallocate with no backing allocation attached to pointer");
- auto& alloc = alloc_ptr.alloc();
- // TODO: Figure out how to prevent this ever being written again.
- //alloc.mark_as_freed();
- for(auto& v : alloc.mask)
- v = 0;
- // Just let it drop.
- return Value();
- }
-#ifdef _WIN32
- // WinAPI functions used by libstd
- else if( link_name == "AddVectoredExceptionHandler" )
- {
- LOG_DEBUG("Call `AddVectoredExceptionHandler` - Ignoring and returning non-null");
- auto rv = Value(::HIR::TypeRef(RawType::USize));
- rv.write_usize(0, 1);
- return rv;
- }
- else if( link_name == "GetModuleHandleW" )
- {
- LOG_ASSERT(args.at(0).allocation.is_alloc(), "");
- const auto& tgt_alloc = args.at(0).allocation.alloc().get_relocation(0);
- const void* arg0 = (tgt_alloc ? tgt_alloc.alloc().data_ptr() : nullptr);
- //extern void* GetModuleHandleW(const void* s);
- if(arg0) {
- LOG_DEBUG("GetModuleHandleW(" << tgt_alloc.alloc() << ")");
- }
- else {
- LOG_DEBUG("GetModuleHandleW(NULL)");
- }
-
- auto rv = GetModuleHandleW(static_cast<LPCWSTR>(arg0));
- if(rv)
- {
- return Value::new_ffiptr(FFIPointer { "GetModuleHandleW", rv });
- }
- else
- {
- auto rv = Value(::HIR::TypeRef(RawType::USize));
- rv.create_allocation();
- rv.write_usize(0,0);
- return rv;
- }
- }
- else if( link_name == "GetProcAddress" )
- {
- LOG_ASSERT(args.at(0).allocation.is_alloc(), "");
- const auto& handle_alloc = args.at(0).allocation.alloc().get_relocation(0);
- LOG_ASSERT(args.at(1).allocation.is_alloc(), "");
- const auto& sym_alloc = args.at(1).allocation.alloc().get_relocation(0);
-
- // TODO: Ensure that first arg is a FFI pointer with offset+size of zero
- void* handle = handle_alloc.ffi().ptr_value;
- // TODO: Get either a FFI data pointer, or a inner data pointer
- const void* symname = sym_alloc.alloc().data_ptr();
- // TODO: Sanity check that it's a valid c string within its allocation
- LOG_DEBUG("FFI GetProcAddress(" << handle << ", \"" << static_cast<const char*>(symname) << "\")");
-
- auto rv = GetProcAddress(static_cast<HMODULE>(handle), static_cast<LPCSTR>(symname));
-
- if( rv )
- {
- return Value::new_ffiptr(FFIPointer { "GetProcAddress", rv });
- }
- else
+ args.push_back(::std::move(val_argc));
+ args.push_back(::std::move(val_argv));
+ Value rv;
+ root_thread.start(tree.find_lang_item("start"), ::std::move(args));
+ while( !root_thread.step_one(rv) )
{
- auto rv = Value(::HIR::TypeRef(RawType::USize));
- rv.create_allocation();
- rv.write_usize(0,0);
- return rv;
- }
- }
-#endif
- // Allocators!
- else
- {
- LOG_TODO("Call external function " << link_name);
- }
- throw "";
-}
-Value MIRI_Invoke_Intrinsic(ModuleTree& modtree, const ::std::string& name, const ::HIR::PathParams& ty_params, ::std::vector<Value> args)
-{
- Value rv;
- TRACE_FUNCTION_R(name, rv);
- for(const auto& a : args)
- LOG_DEBUG("#" << (&a - args.data()) << ": " << a);
- if( name == "atomic_store" )
- {
- auto& ptr_val = args.at(0);
- auto& data_val = args.at(1);
-
- LOG_ASSERT(ptr_val.size() == POINTER_SIZE, "atomic_store of a value that isn't a pointer-sized value");
-
- // There MUST be a relocation at this point with a valid allocation.
- LOG_ASSERT(ptr_val.allocation, "Deref of a value with no allocation (hence no relocations)");
- LOG_TRACE("Deref " << ptr_val.allocation.alloc());
- auto alloc = ptr_val.allocation.alloc().get_relocation(0);
- LOG_ASSERT(alloc, "Deref of a value with no relocation");
-
- // TODO: Atomic side of this?
- size_t ofs = ptr_val.read_usize(0);
- const auto& ty = ty_params.tys.at(0);
- alloc.alloc().write_value(ofs, ::std::move(data_val));
- }
- else if( name == "atomic_load" )
- {
- auto& ptr_val = args.at(0);
- LOG_ASSERT(ptr_val.size() == POINTER_SIZE, "atomic_store of a value that isn't a pointer-sized value");
-
- // There MUST be a relocation at this point with a valid allocation.
- LOG_ASSERT(ptr_val.allocation, "Deref of a value with no allocation (hence no relocations)");
- LOG_TRACE("Deref " << ptr_val.allocation.alloc());
- auto alloc = ptr_val.allocation.alloc().get_relocation(0);
- LOG_ASSERT(alloc, "Deref of a value with no relocation");
-
- // TODO: Atomic side of this?
- size_t ofs = ptr_val.read_usize(0);
- const auto& ty = ty_params.tys.at(0);
- rv = alloc.alloc().read_value(ofs, ty.get_size());
- }
- else if( name == "transmute" )
- {
- // Transmute requires the same size, so just copying the value works
- rv = ::std::move(args.at(0));
- }
- else if( name == "assume" )
- {
- // Assume is a no-op which returns unit
- }
- else if( name == "offset" )
- {
- auto ptr_val = ::std::move(args.at(0));
- auto& ofs_val = args.at(1);
-
- auto r = ptr_val.allocation.alloc().get_relocation(0);
- auto orig_ofs = ptr_val.read_usize(0);
- auto delta_counts = ofs_val.read_usize(0);
- auto new_ofs = orig_ofs + delta_counts * ty_params.tys.at(0).get_size();
- if(POINTER_SIZE != 8) {
- new_ofs &= 0xFFFFFFFF;
}
- ptr_val.write_usize(0, new_ofs);
- ptr_val.allocation.alloc().relocations.push_back({ 0, r });
- rv = ::std::move(ptr_val);
- }
- // effectively ptr::write
- else if( name == "move_val_init" )
- {
- auto& ptr_val = args.at(0);
- auto& data_val = args.at(1);
-
- LOG_ASSERT(ptr_val.size() == POINTER_SIZE, "move_val_init of an address that isn't a pointer-sized value");
-
- // There MUST be a relocation at this point with a valid allocation.
- LOG_ASSERT(ptr_val.allocation, "Deref of a value with no allocation (hence no relocations)");
- LOG_TRACE("Deref " << ptr_val << " and store " << data_val);
- auto alloc = ptr_val.allocation.alloc().get_relocation(0);
- LOG_ASSERT(alloc, "Deref of a value with no relocation");
-
- size_t ofs = ptr_val.read_usize(0);
- const auto& ty = ty_params.tys.at(0);
- alloc.alloc().write_value(ofs, ::std::move(data_val));
- LOG_DEBUG(alloc.alloc());
- }
- else if( name == "uninit" )
- {
- rv = Value(ty_params.tys.at(0));
+ LOG_NOTICE("Return code: " << rv);
}
- // - Unsized stuff
- else if( name == "size_of_val" )
+ catch(const DebugExceptionTodo& /*e*/)
{
- auto& val = args.at(0);
- const auto& ty = ty_params.tys.at(0);
- rv = Value(::HIR::TypeRef(RawType::USize));
- // Get unsized type somehow.
- // - _HAS_ to be the last type, so that makes it easier
- size_t fixed_size = 0;
- if( const auto* ity = ty.get_usized_type(fixed_size) )
- {
- const auto& meta_ty = *ty.get_meta_type();
- LOG_DEBUG("size_of_val - " << ty << " ity=" << *ity << " meta_ty=" << meta_ty << " fixed_size=" << fixed_size);
- size_t flex_size = 0;
- if( !ity->wrappers.empty() )
- {
- LOG_ASSERT(ity->wrappers[0].type == TypeWrapper::Ty::Slice, "");
- size_t item_size = ity->get_inner().get_size();
- size_t item_count = val.read_usize(POINTER_SIZE);
- flex_size = item_count * item_size;
- LOG_DEBUG("> item_size=" << item_size << " item_count=" << item_count << " flex_size=" << flex_size);
- }
- else if( ity->inner_type == RawType::Str )
- {
- flex_size = val.read_usize(POINTER_SIZE);
- }
- else if( ity->inner_type == RawType::TraitObject )
- {
- LOG_TODO("size_of_val - Trait Object - " << ty);
- }
- else
- {
- LOG_BUG("Inner unsized type unknown - " << *ity);
- }
-
- rv.write_usize(0, fixed_size + flex_size);
- }
- else
+ ::std::cerr << "TODO Hit" << ::std::endl;
+ if(opts.logfile != "")
{
- rv.write_usize(0, ty.get_size());
+ ::std::cerr << "- See '" << opts.logfile << "' for details" << ::std::endl;
}
+ return 1;
}
- else if( name == "drop_in_place" )
+ catch(const DebugExceptionError& /*e*/)
{
- auto& val = args.at(0);
- const auto& ty = ty_params.tys.at(0);
- if( !ty.wrappers.empty() )
- {
- size_t item_count = 0;
- switch(ty.wrappers[0].type)
- {
- case TypeWrapper::Ty::Slice:
- case TypeWrapper::Ty::Array:
- item_count = (ty.wrappers[0].type == TypeWrapper::Ty::Slice ? val.read_usize(POINTER_SIZE) : ty.wrappers[0].size);
- break;
- case TypeWrapper::Ty::Pointer:
- break;
- case TypeWrapper::Ty::Borrow:
- break;
- }
- LOG_ASSERT(ty.wrappers[0].type == TypeWrapper::Ty::Slice, "drop_in_place should only exist for slices - " << ty);
- const auto& ity = ty.get_inner();
- size_t item_size = ity.get_size();
-
- auto ptr = val.read_value(0, POINTER_SIZE);;
- for(size_t i = 0; i < item_count; i ++)
- {
- drop_value(modtree, ptr, ity);
- ptr.write_usize(0, ptr.read_usize(0) + item_size);
- }
- }
- else
+ ::std::cerr << "Error encountered" << ::std::endl;
+ if(opts.logfile != "")
{
- LOG_TODO("drop_in_place - " << ty);
+ ::std::cerr << "- See '" << opts.logfile << "' for details" << ::std::endl;
}
+ return 1;
}
- // ----------------------------------------------------------------
- // Checked arithmatic
- else if( name == "add_with_overflow" )
- {
- const auto& ty = ty_params.tys.at(0);
-
- auto lhs = PrimitiveValueVirt::from_value(ty, args.at(0));
- auto rhs = PrimitiveValueVirt::from_value(ty, args.at(1));
- bool didnt_overflow = lhs.get().add( rhs.get() );
-
- // Get return type - a tuple of `(T, bool,)`
- ::HIR::GenericPath gp;
- gp.m_params.tys.push_back(ty);
- gp.m_params.tys.push_back(::HIR::TypeRef { RawType::Bool });
- const auto& dty = modtree.get_composite(gp);
-
- rv = Value(::HIR::TypeRef(&dty));
- lhs.get().write_to_value(rv, dty.fields[0].first);
- rv.write_u8( dty.fields[1].first, didnt_overflow ? 0 : 1 ); // Returns true if overflow happened
- }
- else if( name == "sub_with_overflow" )
- {
- const auto& ty = ty_params.tys.at(0);
-
- auto lhs = PrimitiveValueVirt::from_value(ty, args.at(0));
- auto rhs = PrimitiveValueVirt::from_value(ty, args.at(1));
- bool didnt_overflow = lhs.get().subtract( rhs.get() );
-
- // Get return type - a tuple of `(T, bool,)`
- ::HIR::GenericPath gp;
- gp.m_params.tys.push_back(ty);
- gp.m_params.tys.push_back(::HIR::TypeRef { RawType::Bool });
- const auto& dty = modtree.get_composite(gp);
-
- rv = Value(::HIR::TypeRef(&dty));
- lhs.get().write_to_value(rv, dty.fields[0].first);
- rv.write_u8( dty.fields[1].first, didnt_overflow ? 0 : 1 ); // Returns true if overflow happened
- }
- else if( name == "mul_with_overflow" )
- {
- const auto& ty = ty_params.tys.at(0);
-
- auto lhs = PrimitiveValueVirt::from_value(ty, args.at(0));
- auto rhs = PrimitiveValueVirt::from_value(ty, args.at(1));
- bool didnt_overflow = lhs.get().multiply( rhs.get() );
-
- // Get return type - a tuple of `(T, bool,)`
- ::HIR::GenericPath gp;
- gp.m_params.tys.push_back(ty);
- gp.m_params.tys.push_back(::HIR::TypeRef { RawType::Bool });
- const auto& dty = modtree.get_composite(gp);
-
- rv = Value(::HIR::TypeRef(&dty));
- lhs.get().write_to_value(rv, dty.fields[0].first);
- rv.write_u8( dty.fields[1].first, didnt_overflow ? 0 : 1 ); // Returns true if overflow happened
- }
- // Overflowing artithmatic
- else if( name == "overflowing_sub" )
- {
- const auto& ty = ty_params.tys.at(0);
-
- auto lhs = PrimitiveValueVirt::from_value(ty, args.at(0));
- auto rhs = PrimitiveValueVirt::from_value(ty, args.at(1));
- lhs.get().subtract( rhs.get() );
-
- rv = Value(ty);
- lhs.get().write_to_value(rv, 0);
- }
- // ----------------------------------------------------------------
- // memcpy
- else if( name == "copy_nonoverlapping" )
- {
- auto src_ofs = args.at(0).read_usize(0);
- auto src_alloc = args.at(0).allocation.alloc().get_relocation(0);
- auto dst_ofs = args.at(1).read_usize(0);
- auto dst_alloc = args.at(1).allocation.alloc().get_relocation(0);
- size_t ent_count = args.at(2).read_usize(0);
- size_t ent_size = ty_params.tys.at(0).get_size();
- auto byte_count = ent_count * ent_size;
-
- LOG_ASSERT(src_alloc, "Source of copy* must have an allocation");
- LOG_ASSERT(dst_alloc, "Destination of copy* must be a memory allocation");
- LOG_ASSERT(dst_alloc.is_alloc(), "Destination of copy* must be a memory allocation");
- switch(src_alloc.get_ty())
- {
- case AllocationPtr::Ty::Allocation: {
- auto v = src_alloc.alloc().read_value(src_ofs, byte_count);
- dst_alloc.alloc().write_value(dst_ofs, ::std::move(v));
- } break;
- case AllocationPtr::Ty::StdString:
- LOG_ASSERT(src_ofs <= src_alloc.str().size(), "");
- LOG_ASSERT(byte_count <= src_alloc.str().size(), "");
- LOG_ASSERT(src_ofs + byte_count <= src_alloc.str().size(), "");
- dst_alloc.alloc().write_bytes(dst_ofs, src_alloc.str().data() + src_ofs, byte_count);
- break;
- case AllocationPtr::Ty::Function:
- LOG_FATAL("Attempt to copy* a function");
- break;
- case AllocationPtr::Ty::FfiPointer:
- LOG_BUG("Trying to copy from a FFI pointer");
- break;
- }
- }
- else
- {
- LOG_TODO("Call intrinsic \"" << name << "\"");
- }
- return rv;
+ return 0;
}
int ProgramOptions::parse(int argc, const char* argv[])
{
bool all_free = false;
+ // TODO: use getopt? POSIX only
for(int argidx = 1; argidx < argc; argidx ++)
{
const char* arg = argv[argidx];
if( arg[0] != '-' || all_free )
{
- // Free
+ // Free arguments
+ // - First is the input file
if( this->infile == "" )
{
this->infile = arg;
}
else
{
- // TODO: Too many free arguments
+ // Any subsequent arguments are passed to the taget
+ this->args.push_back(arg);
}
}
else if( arg[1] != '-' )
{
- // Short
+ // Short arguments
+ if( arg[2] != '\0' ) {
+ // Error?
+ ::std::cerr << "Unexpected option " << arg << ::std::endl;
+ return 1;
+ }
+ switch(arg[1])
+ {
+ case 'h':
+ this->show_help(argv[0]);
+ exit(0);
+ default:
+ ::std::cerr << "Unexpected option -" << arg[1] << ::std::endl;
+ return 1;
+ }
}
else if( arg[2] != '\0' )
{
// Long
+ if( ::std::strcmp(arg, "--help") == 0 ) {
+ this->show_help(argv[0]);
+ exit(0);
+ }
+ else if( ::std::strcmp(arg, "--logfile") == 0 ) {
+ if( argidx + 1 == argc ) {
+ ::std::cerr << "Option " << arg << " requires an argument" << ::std::endl;
+ return 1;
+ }
+ const char* opt = argv[++argidx];
+ this->logfile = opt;
+ }
+ //else if( ::std::strcmp(arg, "--api") == 0 ) {
+ //}
+ else {
+ ::std::cerr << "Unexpected option " << arg << ::std::endl;
+ return 1;
+ }
}
else
{
@@ -1948,3 +192,8 @@ int ProgramOptions::parse(int argc, const char* argv[])
}
return 0;
}
+
+void ProgramOptions::show_help(const char* prog) const
+{
+ ::std::cout << "USAGE: " << prog << " <infile> <... args>" << ::std::endl;
+}
diff --git a/tools/standalone_miri/mir.cpp b/tools/standalone_miri/mir.cpp
index 4593fb44..a0601823 100644
--- a/tools/standalone_miri/mir.cpp
+++ b/tools/standalone_miri/mir.cpp
@@ -7,6 +7,7 @@
*/
#include "../../src/mir/mir.hpp"
#include "hir_sim.hpp"
+#include <iostream>
namespace std {
template <typename T>
diff --git a/tools/standalone_miri/miri.cpp b/tools/standalone_miri/miri.cpp
new file mode 100644
index 00000000..f4179b5d
--- /dev/null
+++ b/tools/standalone_miri/miri.cpp
@@ -0,0 +1,2305 @@
+/*
+ * mrustc Standalone MIRI
+ * - by John Hodge (Mutabah)
+ *
+ * miri.cpp
+ * - Interpreter core
+ */
+#include <iostream>
+#include "module_tree.hpp"
+#include "value.hpp"
+#include <algorithm>
+#include <iomanip>
+#include "debug.hpp"
+#include "miri.hpp"
+#ifdef _WIN32
+# define NOMINMAX
+# include <Windows.h>
+#endif
+
+unsigned ThreadState::s_next_tls_key = 1;
+
+class PrimitiveValue
+{
+public:
+ virtual ~PrimitiveValue() {}
+
+ virtual bool add(const PrimitiveValue& v) = 0;
+ virtual bool subtract(const PrimitiveValue& v) = 0;
+ virtual bool multiply(const PrimitiveValue& v) = 0;
+ virtual bool divide(const PrimitiveValue& v) = 0;
+ virtual bool modulo(const PrimitiveValue& v) = 0;
+ virtual void write_to_value(ValueCommonWrite& tgt, size_t ofs) const = 0;
+
+ template<typename T>
+ const T& check(const char* opname) const
+ {
+ const auto* xp = dynamic_cast<const T*>(this);
+ LOG_ASSERT(xp, "Attempting to " << opname << " mismatched types, expected " << typeid(T).name() << " got " << typeid(*this).name());
+ return *xp;
+ }
+};
+template<typename T>
+struct PrimitiveUInt:
+ public PrimitiveValue
+{
+ typedef PrimitiveUInt<T> Self;
+ T v;
+
+ PrimitiveUInt(T v): v(v) {}
+ ~PrimitiveUInt() override {}
+
+ bool add(const PrimitiveValue& x) override {
+ const auto* xp = &x.check<Self>("add");
+ T newv = this->v + xp->v;
+ bool did_overflow = newv < this->v;
+ this->v = newv;
+ return !did_overflow;
+ }
+ bool subtract(const PrimitiveValue& x) override {
+ const auto* xp = &x.check<Self>("subtract");
+ T newv = this->v - xp->v;
+ bool did_overflow = newv > this->v;
+ this->v = newv;
+ return !did_overflow;
+ }
+ bool multiply(const PrimitiveValue& x) override {
+ const auto* xp = &x.check<Self>("multiply");
+ T newv = this->v * xp->v;
+ bool did_overflow = newv < this->v && newv < xp->v;
+ this->v = newv;
+ return !did_overflow;
+ }
+ bool divide(const PrimitiveValue& x) override {
+ const auto* xp = &x.check<Self>("divide");
+ if(xp->v == 0) return false;
+ T newv = this->v / xp->v;
+ this->v = newv;
+ return true;
+ }
+ bool modulo(const PrimitiveValue& x) override {
+ const auto* xp = &x.check<Self>("modulo");
+ if(xp->v == 0) return false;
+ T newv = this->v % xp->v;
+ this->v = newv;
+ return true;
+ }
+};
+struct PrimitiveU64: public PrimitiveUInt<uint64_t>
+{
+ PrimitiveU64(uint64_t v): PrimitiveUInt(v) {}
+ void write_to_value(ValueCommonWrite& tgt, size_t ofs) const override {
+ tgt.write_u64(ofs, this->v);
+ }
+};
+struct PrimitiveU32: public PrimitiveUInt<uint32_t>
+{
+ PrimitiveU32(uint32_t v): PrimitiveUInt(v) {}
+ void write_to_value(ValueCommonWrite& tgt, size_t ofs) const override {
+ tgt.write_u32(ofs, this->v);
+ }
+};
+template<typename T>
+struct PrimitiveSInt:
+ public PrimitiveValue
+{
+ typedef PrimitiveSInt<T> Self;
+ T v;
+
+ PrimitiveSInt(T v): v(v) {}
+ ~PrimitiveSInt() override {}
+
+ // TODO: Make this correct.
+ bool add(const PrimitiveValue& x) override {
+ const auto* xp = &x.check<Self>("add");
+ T newv = this->v + xp->v;
+ bool did_overflow = newv < this->v;
+ this->v = newv;
+ return !did_overflow;
+ }
+ bool subtract(const PrimitiveValue& x) override {
+ const auto* xp = &x.check<Self>("subtract");
+ T newv = this->v - xp->v;
+ bool did_overflow = newv > this->v;
+ this->v = newv;
+ return !did_overflow;
+ }
+ bool multiply(const PrimitiveValue& x) override {
+ const auto* xp = &x.check<Self>("multiply");
+ T newv = this->v * xp->v;
+ bool did_overflow = newv < this->v && newv < xp->v;
+ this->v = newv;
+ return !did_overflow;
+ }
+ bool divide(const PrimitiveValue& x) override {
+ const auto* xp = &x.check<Self>("divide");
+ if(xp->v == 0) return false;
+ T newv = this->v / xp->v;
+ this->v = newv;
+ return true;
+ }
+ bool modulo(const PrimitiveValue& x) override {
+ const auto* xp = &x.check<Self>("modulo");
+ if(xp->v == 0) return false;
+ T newv = this->v % xp->v;
+ this->v = newv;
+ return true;
+ }
+};
+struct PrimitiveI64: public PrimitiveSInt<int64_t>
+{
+ PrimitiveI64(int64_t v): PrimitiveSInt(v) {}
+ void write_to_value(ValueCommonWrite& tgt, size_t ofs) const override {
+ tgt.write_i64(ofs, this->v);
+ }
+};
+struct PrimitiveI32: public PrimitiveSInt<int32_t>
+{
+ PrimitiveI32(int32_t v): PrimitiveSInt(v) {}
+ void write_to_value(ValueCommonWrite& tgt, size_t ofs) const override {
+ tgt.write_i32(ofs, this->v);
+ }
+};
+
+class PrimitiveValueVirt
+{
+ uint64_t buf[3]; // Allows i128 plus a vtable pointer
+ PrimitiveValueVirt() {}
+public:
+ // HACK: No copy/move constructors, assumes that contained data is always POD
+ ~PrimitiveValueVirt() {
+ reinterpret_cast<PrimitiveValue*>(&this->buf)->~PrimitiveValue();
+ }
+ PrimitiveValue& get() { return *reinterpret_cast<PrimitiveValue*>(&this->buf); }
+ const PrimitiveValue& get() const { return *reinterpret_cast<const PrimitiveValue*>(&this->buf); }
+
+ static PrimitiveValueVirt from_value(const ::HIR::TypeRef& t, const ValueRef& v) {
+ PrimitiveValueVirt rv;
+ LOG_ASSERT(t.get_wrapper() == nullptr, "PrimitiveValueVirt::from_value: " << t);
+ switch(t.inner_type)
+ {
+ case RawType::U32:
+ new(&rv.buf) PrimitiveU32(v.read_u32(0));
+ break;
+ case RawType::U64:
+ new(&rv.buf) PrimitiveU64(v.read_u64(0));
+ break;
+ case RawType::USize:
+ if( POINTER_SIZE == 8 )
+ new(&rv.buf) PrimitiveU64(v.read_u64(0));
+ else
+ new(&rv.buf) PrimitiveU32(v.read_u32(0));
+ break;
+
+ case RawType::I32:
+ new(&rv.buf) PrimitiveI32(v.read_i32(0));
+ break;
+ case RawType::I64:
+ new(&rv.buf) PrimitiveI64(v.read_i64(0));
+ break;
+ case RawType::ISize:
+ if( POINTER_SIZE == 8 )
+ new(&rv.buf) PrimitiveI64(v.read_i64(0));
+ else
+ new(&rv.buf) PrimitiveI32(v.read_i32(0));
+ break;
+ default:
+ LOG_TODO("PrimitiveValueVirt::from_value: " << t);
+ }
+ return rv;
+ }
+};
+
+struct Ops {
+ template<typename T>
+ static int do_compare(T l, T r) {
+ if( l == r ) {
+ return 0;
+ }
+ else if( !(l != r) ) {
+ // Special return value for NaN w/ NaN
+ return 2;
+ }
+ else if( l < r ) {
+ return -1;
+ }
+ else {
+ return 1;
+ }
+ }
+ template<typename T>
+ static T do_bitwise(T l, T r, ::MIR::eBinOp op) {
+ switch(op)
+ {
+ case ::MIR::eBinOp::BIT_AND: return l & r;
+ case ::MIR::eBinOp::BIT_OR: return l | r;
+ case ::MIR::eBinOp::BIT_XOR: return l ^ r;
+ case ::MIR::eBinOp::BIT_SHL: return l << r;
+ case ::MIR::eBinOp::BIT_SHR: return l >> r;
+ default:
+ LOG_BUG("Unexpected operation in Ops::do_bitwise");
+ }
+ }
+};
+
+struct MirHelpers
+{
+ InterpreterThread& thread;
+ InterpreterThread::StackFrame& frame;
+
+ MirHelpers(InterpreterThread& thread, InterpreterThread::StackFrame& frame):
+ thread(thread),
+ frame(frame)
+ {
+ }
+
+ ValueRef get_value_and_type(const ::MIR::LValue& lv, ::HIR::TypeRef& ty)
+ {
+ switch(lv.tag())
+ {
+ case ::MIR::LValue::TAGDEAD: throw "";
+ // --> Slots
+ TU_ARM(lv, Return, _e) {
+ ty = this->frame.fcn.ret_ty;
+ return ValueRef(this->frame.ret);
+ } break;
+ TU_ARM(lv, Local, e) {
+ ty = this->frame.fcn.m_mir.locals.at(e);
+ return ValueRef(this->frame.locals.at(e));
+ } break;
+ TU_ARM(lv, Argument, e) {
+ ty = this->frame.fcn.args.at(e.idx);
+ return ValueRef(this->frame.args.at(e.idx));
+ } break;
+ TU_ARM(lv, Static, e) {
+ /*const*/ auto& s = this->thread.m_modtree.get_static(e);
+ ty = s.ty;
+ return ValueRef(s.val);
+ } break;
+ // --> Modifiers
+ TU_ARM(lv, Index, e) {
+ auto idx = get_value_ref(*e.idx).read_usize(0);
+ ::HIR::TypeRef array_ty;
+ auto base_val = get_value_and_type(*e.val, array_ty);
+ const auto* wrapper = array_ty.get_wrapper();
+ if( !wrapper )
+ {
+ LOG_ERROR("Indexing non-array/slice - " << array_ty);
+ }
+ else if( wrapper->type == TypeWrapper::Ty::Array )
+ {
+ ty = array_ty.get_inner();
+ base_val.m_offset += ty.get_size() * idx;
+ return base_val;
+ }
+ else if( wrapper->type == TypeWrapper::Ty::Slice )
+ {
+ LOG_TODO("Slice index");
+ }
+ else
+ {
+ LOG_ERROR("Indexing non-array/slice - " << array_ty);
+ throw "ERROR";
+ }
+ } break;
+ TU_ARM(lv, Field, e) {
+ ::HIR::TypeRef composite_ty;
+ auto base_val = get_value_and_type(*e.val, composite_ty);
+ // TODO: if there's metadata present in the base, but the inner doesn't have metadata, clear the metadata
+ size_t inner_ofs;
+ ty = composite_ty.get_field(e.field_index, inner_ofs);
+ LOG_DEBUG("Field - " << composite_ty << "#" << e.field_index << " = @" << inner_ofs << " " << ty);
+ base_val.m_offset += inner_ofs;
+ if( ty.get_meta_type() == HIR::TypeRef(RawType::Unreachable) )
+ {
+ LOG_ASSERT(base_val.m_size >= ty.get_size(), "Field didn't fit in the value - " << ty.get_size() << " required, but " << base_val.m_size << " avail");
+ base_val.m_size = ty.get_size();
+ }
+ return base_val;
+ }
+ TU_ARM(lv, Downcast, e) {
+ ::HIR::TypeRef composite_ty;
+ auto base_val = get_value_and_type(*e.val, composite_ty);
+ LOG_DEBUG("Downcast - " << composite_ty);
+
+ size_t inner_ofs;
+ ty = composite_ty.get_field(e.variant_index, inner_ofs);
+ base_val.m_offset += inner_ofs;
+ return base_val;
+ }
+ TU_ARM(lv, Deref, e) {
+ ::HIR::TypeRef ptr_ty;
+ auto val = get_value_and_type(*e.val, ptr_ty);
+ ty = ptr_ty.get_inner();
+ LOG_DEBUG("val = " << val << ", (inner) ty=" << ty);
+
+ LOG_ASSERT(val.m_size >= POINTER_SIZE, "Deref of a value that doesn't fit a pointer - " << ty);
+ size_t ofs = val.read_usize(0);
+
+ // There MUST be a relocation at this point with a valid allocation.
+ auto alloc = val.get_relocation(val.m_offset);
+ LOG_TRACE("Deref " << alloc << " + " << ofs << " to give value of type " << ty);
+ // NOTE: No alloc can happen when dereferencing a zero-sized pointer
+ if( alloc.is_alloc() )
+ {
+ LOG_DEBUG("> " << lv << " alloc=" << alloc.alloc());
+ }
+ size_t size;
+
+ const auto meta_ty = ty.get_meta_type();
+ ::std::shared_ptr<Value> meta_val;
+ // If the type has metadata, store it.
+ if( meta_ty != RawType::Unreachable )
+ {
+ auto meta_size = meta_ty.get_size();
+ LOG_ASSERT(val.m_size == POINTER_SIZE + meta_size, "Deref of " << ty << ", but pointer isn't correct size");
+ meta_val = ::std::make_shared<Value>( val.read_value(POINTER_SIZE, meta_size) );
+
+ size_t slice_inner_size;
+ if( ty.has_slice_meta(slice_inner_size) ) {
+ size = (ty.get_wrapper() == nullptr ? ty.get_size() : 0) + meta_val->read_usize(0) * slice_inner_size;
+ }
+ //else if( ty == RawType::TraitObject) {
+ // // NOTE: Getting the size from the allocation is semi-valid, as you can't sub-slice trait objects
+ // size = alloc.get_size() - ofs;
+ //}
+ else {
+ LOG_DEBUG("> Meta " << *meta_val << ", size = " << alloc.get_size() << " - " << ofs);
+ size = alloc.get_size() - ofs;
+ }
+ }
+ else
+ {
+ LOG_ASSERT(val.m_size == POINTER_SIZE, "Deref of a value that isn't a pointer-sized value (size=" << val.m_size << ") - " << val << ": " << ptr_ty);
+ size = ty.get_size();
+ if( !alloc ) {
+ LOG_ERROR("Deref of a value with no relocation - " << val);
+ }
+ }
+
+ LOG_DEBUG("alloc=" << alloc << ", ofs=" << ofs << ", size=" << size);
+ auto rv = ValueRef(::std::move(alloc), ofs, size);
+ rv.m_metadata = ::std::move(meta_val);
+ return rv;
+ } break;
+ }
+ throw "";
+ }
+ ValueRef get_value_ref(const ::MIR::LValue& lv)
+ {
+ ::HIR::TypeRef tmp;
+ return get_value_and_type(lv, tmp);
+ }
+
+ ::HIR::TypeRef get_lvalue_ty(const ::MIR::LValue& lv)
+ {
+ ::HIR::TypeRef ty;
+ get_value_and_type(lv, ty);
+ return ty;
+ }
+
+ Value read_lvalue_with_ty(const ::MIR::LValue& lv, ::HIR::TypeRef& ty)
+ {
+ auto base_value = get_value_and_type(lv, ty);
+
+ return base_value.read_value(0, ty.get_size());
+ }
+ Value read_lvalue(const ::MIR::LValue& lv)
+ {
+ ::HIR::TypeRef ty;
+ return read_lvalue_with_ty(lv, ty);
+ }
+ void write_lvalue(const ::MIR::LValue& lv, Value val)
+ {
+ // TODO: Ensure that target is writable? Or should write_value do that?
+ //LOG_DEBUG(lv << " = " << val);
+ ::HIR::TypeRef ty;
+ auto base_value = get_value_and_type(lv, ty);
+
+ if(base_value.m_alloc) {
+ base_value.m_alloc.alloc().write_value(base_value.m_offset, ::std::move(val));
+ }
+ else {
+ base_value.m_value->write_value(base_value.m_offset, ::std::move(val));
+ }
+ }
+
+ Value const_to_value(const ::MIR::Constant& c, ::HIR::TypeRef& ty)
+ {
+ switch(c.tag())
+ {
+ case ::MIR::Constant::TAGDEAD: throw "";
+ TU_ARM(c, Int, ce) {
+ ty = ::HIR::TypeRef(ce.t);
+ Value val = Value(ty);
+ val.write_bytes(0, &ce.v, ::std::min(ty.get_size(), sizeof(ce.v))); // TODO: Endian
+ // TODO: If the write was clipped, sign-extend
+ // TODO: i128/u128 need the upper bytes cleared+valid
+ return val;
+ } break;
+ TU_ARM(c, Uint, ce) {
+ ty = ::HIR::TypeRef(ce.t);
+ Value val = Value(ty);
+ val.write_bytes(0, &ce.v, ::std::min(ty.get_size(), sizeof(ce.v))); // TODO: Endian
+ // TODO: i128/u128 need the upper bytes cleared+valid
+ return val;
+ } break;
+ TU_ARM(c, Bool, ce) {
+ Value val = Value(::HIR::TypeRef { RawType::Bool });
+ val.write_bytes(0, &ce.v, 1);
+ return val;
+ } break;
+ TU_ARM(c, Float, ce) {
+ ty = ::HIR::TypeRef(ce.t);
+ Value val = Value(ty);
+ if( ce.t.raw_type == RawType::F64 ) {
+ val.write_bytes(0, &ce.v, ::std::min(ty.get_size(), sizeof(ce.v))); // TODO: Endian/format?
+ }
+ else if( ce.t.raw_type == RawType::F32 ) {
+ float v = static_cast<float>(ce.v);
+ val.write_bytes(0, &v, ::std::min(ty.get_size(), sizeof(v))); // TODO: Endian/format?
+ }
+ else {
+ throw ::std::runtime_error("BUG: Invalid type in Constant::Float");
+ }
+ return val;
+ } break;
+ TU_ARM(c, Const, ce) {
+ LOG_BUG("Constant::Const in mmir");
+ } break;
+ TU_ARM(c, Bytes, ce) {
+ LOG_TODO("Constant::Bytes");
+ } break;
+ TU_ARM(c, StaticString, ce) {
+ ty = ::HIR::TypeRef(RawType::Str).wrap(TypeWrapper::Ty::Borrow, 0);
+ Value val = Value(ty);
+ val.write_ptr(0, 0, RelocationPtr::new_string(&ce));
+ val.write_usize(POINTER_SIZE, ce.size());
+ LOG_DEBUG(c << " = " << val);
+ return val;
+ } break;
+ // --> Accessor
+ TU_ARM(c, ItemAddr, ce) {
+ // Create a value with a special backing allocation of zero size that references the specified item.
+ if( /*const auto* fn =*/ this->thread.m_modtree.get_function_opt(ce) ) {
+ ty = ::HIR::TypeRef(RawType::Function);
+ return Value::new_fnptr(ce);
+ }
+ if( const auto* s = this->thread.m_modtree.get_static_opt(ce) ) {
+ ty = s->ty.wrapped(TypeWrapper::Ty::Borrow, 0);
+ return Value::new_pointer(ty, 0, RelocationPtr::new_alloc(s->val.allocation));
+ }
+ LOG_ERROR("Constant::ItemAddr - " << ce << " - not found");
+ } break;
+ }
+ throw "";
+ }
+ Value const_to_value(const ::MIR::Constant& c)
+ {
+ ::HIR::TypeRef ty;
+ return const_to_value(c, ty);
+ }
+ Value param_to_value(const ::MIR::Param& p, ::HIR::TypeRef& ty)
+ {
+ switch(p.tag())
+ {
+ case ::MIR::Param::TAGDEAD: throw "";
+ TU_ARM(p, Constant, pe)
+ return const_to_value(pe, ty);
+ TU_ARM(p, LValue, pe)
+ return read_lvalue_with_ty(pe, ty);
+ }
+ throw "";
+ }
+ Value param_to_value(const ::MIR::Param& p)
+ {
+ ::HIR::TypeRef ty;
+ return param_to_value(p, ty);
+ }
+
+ ValueRef get_value_ref_param(const ::MIR::Param& p, Value& tmp, ::HIR::TypeRef& ty)
+ {
+ switch(p.tag())
+ {
+ case ::MIR::Param::TAGDEAD: throw "";
+ TU_ARM(p, Constant, pe)
+ tmp = const_to_value(pe, ty);
+ return ValueRef(tmp, 0, ty.get_size());
+ TU_ARM(p, LValue, pe)
+ return get_value_and_type(pe, ty);
+ }
+ throw "";
+ }
+};
+
+// ====================================================================
+//
+// ====================================================================
+InterpreterThread::~InterpreterThread()
+{
+ for(size_t i = 0; i < m_stack.size(); i++)
+ {
+ const auto& frame = m_stack[m_stack.size() - 1 - i];
+ ::std::cout << "#" << i << ": ";
+ if( frame.cb )
+ {
+ ::std::cout << "WRAPPER";
+ }
+ else
+ {
+ ::std::cout << frame.fcn.my_path << " BB" << frame.bb_idx << "/";
+ if( frame.stmt_idx == frame.fcn.m_mir.blocks.at(frame.bb_idx).statements.size() )
+ ::std::cout << "TERM";
+ else
+ ::std::cout << frame.stmt_idx;
+ }
+ ::std::cout << ::std::endl;
+ }
+}
+void InterpreterThread::start(const ::HIR::Path& p, ::std::vector<Value> args)
+{
+ assert( this->m_stack.empty() );
+ Value v;
+ if( this->call_path(v, p, ::std::move(args)) )
+ {
+ LOG_TODO("Handle immediate return thread entry");
+ }
+}
+bool InterpreterThread::step_one(Value& out_thread_result)
+{
+ assert( !this->m_stack.empty() );
+ assert( !this->m_stack.back().cb );
+ auto& cur_frame = this->m_stack.back();
+ TRACE_FUNCTION_R(cur_frame.fcn.my_path, "");
+ const auto& bb = cur_frame.fcn.m_mir.blocks.at( cur_frame.bb_idx );
+
+ const size_t MAX_STACK_DEPTH = 40;
+ if( this->m_stack.size() > MAX_STACK_DEPTH )
+ {
+ LOG_ERROR("Maximum stack depth of " << MAX_STACK_DEPTH << " exceeded");
+ }
+
+ MirHelpers state { *this, cur_frame };
+
+ if( cur_frame.stmt_idx < bb.statements.size() )
+ {
+ const auto& stmt = bb.statements[cur_frame.stmt_idx];
+ LOG_DEBUG("=== BB" << cur_frame.bb_idx << "/" << cur_frame.stmt_idx << ": " << stmt);
+ switch(stmt.tag())
+ {
+ case ::MIR::Statement::TAGDEAD: throw "";
+ TU_ARM(stmt, Assign, se) {
+ Value new_val;
+ switch(se.src.tag())
+ {
+ case ::MIR::RValue::TAGDEAD: throw "";
+ TU_ARM(se.src, Use, re) {
+ new_val = state.read_lvalue(re);
+ } break;
+ TU_ARM(se.src, Constant, re) {
+ new_val = state.const_to_value(re);
+ } break;
+ TU_ARM(se.src, Borrow, re) {
+ ::HIR::TypeRef src_ty;
+ ValueRef src_base_value = state.get_value_and_type(re.val, src_ty);
+ auto alloc = src_base_value.m_alloc;
+ if( !alloc && src_base_value.m_value )
+ {
+ if( !src_base_value.m_value->allocation )
+ {
+ src_base_value.m_value->create_allocation();
+ }
+ alloc = RelocationPtr::new_alloc( src_base_value.m_value->allocation );
+ }
+ if( alloc.is_alloc() )
+ LOG_DEBUG("- alloc=" << alloc << " (" << alloc.alloc() << ")");
+ else
+ LOG_DEBUG("- alloc=" << alloc);
+ size_t ofs = src_base_value.m_offset;
+ const auto meta = src_ty.get_meta_type();
+ auto dst_ty = src_ty.wrapped(TypeWrapper::Ty::Borrow, static_cast<size_t>(re.type));
+
+ // Create the pointer
+ new_val = Value(dst_ty);
+ new_val.write_ptr(0, ofs, ::std::move(alloc));
+ // - Add metadata if required
+ if( meta != RawType::Unreachable )
+ {
+ LOG_ASSERT(src_base_value.m_metadata, "Borrow of an unsized value, but no metadata avaliable");
+ new_val.write_value(POINTER_SIZE, *src_base_value.m_metadata);
+ }
+ } break;
+ TU_ARM(se.src, Cast, re) {
+ // Determine the type of cast, is it a reinterpret or is it a value transform?
+ // - Float <-> integer is a transform, anything else should be a reinterpret.
+ ::HIR::TypeRef src_ty;
+ auto src_value = state.get_value_and_type(re.val, src_ty);
+
+ new_val = Value(re.type);
+ if( re.type == src_ty )
+ {
+ // No-op cast
+ new_val = src_value.read_value(0, re.type.get_size());
+ }
+ else if( const auto* dst_w = re.type.get_wrapper() )
+ {
+ // Destination can only be a raw pointer
+ if( dst_w->type != TypeWrapper::Ty::Pointer ) {
+ LOG_ERROR("Attempting to cast to a type other than a raw pointer - " << re.type);
+ }
+ if( const auto* src_w = src_ty.get_wrapper() )
+ {
+ // Source can be either
+ if( src_w->type != TypeWrapper::Ty::Pointer && src_w->type != TypeWrapper::Ty::Borrow ) {
+ LOG_ERROR("Attempting to cast to a pointer from a non-pointer - " << src_ty);
+ }
+
+ if( src_ty.get_size() < re.type.get_size() )
+ {
+ LOG_ERROR("Casting to a fatter pointer, " << src_ty << " -> " << re.type);
+ }
+ else
+ {
+ new_val = src_value.read_value(0, re.type.get_size());
+ }
+ }
+ else
+ {
+ if( src_ty == RawType::Function )
+ {
+ }
+ else if( src_ty == RawType::USize )
+ {
+ }
+ else
+ {
+ LOG_ERROR("Trying to cast to pointer (" << re.type <<" ) from invalid type (" << src_ty << ")\n");
+ }
+ new_val = src_value.read_value(0, re.type.get_size());
+ }
+ }
+ else if( const auto* src_w = src_ty.get_wrapper() )
+ {
+ if( src_w->type != TypeWrapper::Ty::Pointer && src_w->type != TypeWrapper::Ty::Borrow ) {
+ LOG_ERROR("Attempting to cast to a non-pointer - " << src_ty);
+ }
+ // TODO: MUST be a thin pointer?
+
+ // TODO: MUST be an integer (usize only?)
+ if( re.type != RawType::USize && re.type != RawType::ISize ) {
+ LOG_ERROR("Casting from a pointer to non-usize - " << re.type << " to " << src_ty);
+ throw "ERROR";
+ }
+ new_val = src_value.read_value(0, re.type.get_size());
+ }
+ else
+ {
+ // TODO: What happens if there'a cast of something with a relocation?
+ switch(re.type.inner_type)
+ {
+ case RawType::Unreachable: throw "BUG";
+ case RawType::Composite:
+ case RawType::TraitObject:
+ case RawType::Function:
+ case RawType::Str:
+ case RawType::Unit:
+ LOG_ERROR("Casting to " << re.type << " is invalid");
+ throw "ERROR";
+ case RawType::F32: {
+ float dst_val = 0.0;
+ // Can be an integer, or F64 (pointer is impossible atm)
+ switch(src_ty.inner_type)
+ {
+ case RawType::Unreachable: throw "BUG";
+ case RawType::Composite: throw "ERROR";
+ case RawType::TraitObject: throw "ERROR";
+ case RawType::Function: throw "ERROR";
+ case RawType::Char: throw "ERROR";
+ case RawType::Str: throw "ERROR";
+ case RawType::Unit: throw "ERROR";
+ case RawType::Bool: throw "ERROR";
+ case RawType::F32: throw "BUG";
+ case RawType::F64: dst_val = static_cast<float>( src_value.read_f64(0) ); break;
+ case RawType::USize: throw "TODO";// /*dst_val = src_value.read_usize();*/ break;
+ case RawType::ISize: throw "TODO";// /*dst_val = src_value.read_isize();*/ break;
+ case RawType::U8: dst_val = static_cast<float>( src_value.read_u8 (0) ); break;
+ case RawType::I8: dst_val = static_cast<float>( src_value.read_i8 (0) ); break;
+ case RawType::U16: dst_val = static_cast<float>( src_value.read_u16(0) ); break;
+ case RawType::I16: dst_val = static_cast<float>( src_value.read_i16(0) ); break;
+ case RawType::U32: dst_val = static_cast<float>( src_value.read_u32(0) ); break;
+ case RawType::I32: dst_val = static_cast<float>( src_value.read_i32(0) ); break;
+ case RawType::U64: dst_val = static_cast<float>( src_value.read_u64(0) ); break;
+ case RawType::I64: dst_val = static_cast<float>( src_value.read_i64(0) ); break;
+ case RawType::U128: throw "TODO";// /*dst_val = src_value.read_u128();*/ break;
+ case RawType::I128: throw "TODO";// /*dst_val = src_value.read_i128();*/ break;
+ }
+ new_val.write_f32(0, dst_val);
+ } break;
+ case RawType::F64: {
+ double dst_val = 0.0;
+ // Can be an integer, or F32 (pointer is impossible atm)
+ switch(src_ty.inner_type)
+ {
+ case RawType::Unreachable: throw "BUG";
+ case RawType::Composite: throw "ERROR";
+ case RawType::TraitObject: throw "ERROR";
+ case RawType::Function: throw "ERROR";
+ case RawType::Char: throw "ERROR";
+ case RawType::Str: throw "ERROR";
+ case RawType::Unit: throw "ERROR";
+ case RawType::Bool: throw "ERROR";
+ case RawType::F64: throw "BUG";
+ case RawType::F32: dst_val = static_cast<double>( src_value.read_f32(0) ); break;
+ case RawType::USize: dst_val = static_cast<double>( src_value.read_usize(0) ); break;
+ case RawType::ISize: dst_val = static_cast<double>( src_value.read_isize(0) ); break;
+ case RawType::U8: dst_val = static_cast<double>( src_value.read_u8 (0) ); break;
+ case RawType::I8: dst_val = static_cast<double>( src_value.read_i8 (0) ); break;
+ case RawType::U16: dst_val = static_cast<double>( src_value.read_u16(0) ); break;
+ case RawType::I16: dst_val = static_cast<double>( src_value.read_i16(0) ); break;
+ case RawType::U32: dst_val = static_cast<double>( src_value.read_u32(0) ); break;
+ case RawType::I32: dst_val = static_cast<double>( src_value.read_i32(0) ); break;
+ case RawType::U64: dst_val = static_cast<double>( src_value.read_u64(0) ); break;
+ case RawType::I64: dst_val = static_cast<double>( src_value.read_i64(0) ); break;
+ case RawType::U128: throw "TODO"; /*dst_val = src_value.read_u128();*/ break;
+ case RawType::I128: throw "TODO"; /*dst_val = src_value.read_i128();*/ break;
+ }
+ new_val.write_f64(0, dst_val);
+ } break;
+ case RawType::Bool:
+ LOG_TODO("Cast to " << re.type);
+ case RawType::Char:
+ LOG_TODO("Cast to " << re.type);
+ case RawType::USize:
+ case RawType::U8:
+ case RawType::U16:
+ case RawType::U32:
+ case RawType::U64:
+ case RawType::ISize:
+ case RawType::I8:
+ case RawType::I16:
+ case RawType::I32:
+ case RawType::I64:
+ {
+ uint64_t dst_val = 0;
+ // Can be an integer, or F32 (pointer is impossible atm)
+ switch(src_ty.inner_type)
+ {
+ case RawType::Unreachable:
+ LOG_BUG("Casting unreachable");
+ case RawType::TraitObject:
+ case RawType::Str:
+ LOG_FATAL("Cast of unsized type - " << src_ty);
+ case RawType::Function:
+ LOG_ASSERT(re.type.inner_type == RawType::USize, "Function pointers can only be casted to usize, instead " << re.type);
+ new_val = src_value.read_value(0, re.type.get_size());
+ break;
+ case RawType::Char:
+ LOG_ASSERT(re.type.inner_type == RawType::U32, "Char can only be casted to u32, instead " << re.type);
+ new_val = src_value.read_value(0, 4);
+ break;
+ case RawType::Unit:
+ LOG_FATAL("Cast of unit");
+ case RawType::Composite: {
+ const auto& dt = *src_ty.composite_type;
+ if( dt.variants.size() == 0 ) {
+ LOG_FATAL("Cast of composite - " << src_ty);
+ }
+ // TODO: Check that all variants have the same tag offset
+ LOG_ASSERT(dt.fields.size() == 1, "");
+ LOG_ASSERT(dt.fields[0].first == 0, "");
+ for(size_t i = 0; i < dt.variants.size(); i ++ ) {
+ LOG_ASSERT(dt.variants[i].base_field == 0, "");
+ LOG_ASSERT(dt.variants[i].field_path.empty(), "");
+ }
+ ::HIR::TypeRef tag_ty = dt.fields[0].second;
+ LOG_ASSERT(tag_ty.get_wrapper() == nullptr, "");
+ switch(tag_ty.inner_type)
+ {
+ case RawType::USize:
+ dst_val = static_cast<uint64_t>( src_value.read_usize(0) );
+ if(0)
+ case RawType::ISize:
+ dst_val = static_cast<uint64_t>( src_value.read_isize(0) );
+ if(0)
+ case RawType::U8:
+ dst_val = static_cast<uint64_t>( src_value.read_u8 (0) );
+ if(0)
+ case RawType::I8:
+ dst_val = static_cast<uint64_t>( src_value.read_i8 (0) );
+ if(0)
+ case RawType::U16:
+ dst_val = static_cast<uint64_t>( src_value.read_u16(0) );
+ if(0)
+ case RawType::I16:
+ dst_val = static_cast<uint64_t>( src_value.read_i16(0) );
+ if(0)
+ case RawType::U32:
+ dst_val = static_cast<uint64_t>( src_value.read_u32(0) );
+ if(0)
+ case RawType::I32:
+ dst_val = static_cast<uint64_t>( src_value.read_i32(0) );
+ if(0)
+ case RawType::U64:
+ dst_val = static_cast<uint64_t>( src_value.read_u64(0) );
+ if(0)
+ case RawType::I64:
+ dst_val = static_cast<uint64_t>( src_value.read_i64(0) );
+ break;
+ default:
+ LOG_FATAL("Bad tag type in cast - " << tag_ty);
+ }
+ } if(0)
+ case RawType::Bool:
+ dst_val = static_cast<uint64_t>( src_value.read_u8 (0) );
+ if(0)
+ case RawType::F64:
+ dst_val = static_cast<uint64_t>( src_value.read_f64(0) );
+ if(0)
+ case RawType::F32:
+ dst_val = static_cast<uint64_t>( src_value.read_f32(0) );
+ if(0)
+ case RawType::USize:
+ dst_val = static_cast<uint64_t>( src_value.read_usize(0) );
+ if(0)
+ case RawType::ISize:
+ dst_val = static_cast<uint64_t>( src_value.read_isize(0) );
+ if(0)
+ case RawType::U8:
+ dst_val = static_cast<uint64_t>( src_value.read_u8 (0) );
+ if(0)
+ case RawType::I8:
+ dst_val = static_cast<uint64_t>( src_value.read_i8 (0) );
+ if(0)
+ case RawType::U16:
+ dst_val = static_cast<uint64_t>( src_value.read_u16(0) );
+ if(0)
+ case RawType::I16:
+ dst_val = static_cast<uint64_t>( src_value.read_i16(0) );
+ if(0)
+ case RawType::U32:
+ dst_val = static_cast<uint64_t>( src_value.read_u32(0) );
+ if(0)
+ case RawType::I32:
+ dst_val = static_cast<uint64_t>( src_value.read_i32(0) );
+ if(0)
+ case RawType::U64:
+ dst_val = static_cast<uint64_t>( src_value.read_u64(0) );
+ if(0)
+ case RawType::I64:
+ dst_val = static_cast<uint64_t>( src_value.read_i64(0) );
+
+ switch(re.type.inner_type)
+ {
+ case RawType::USize:
+ new_val.write_usize(0, dst_val);
+ break;
+ case RawType::U8:
+ new_val.write_u8(0, static_cast<uint8_t>(dst_val));
+ break;
+ case RawType::U16:
+ new_val.write_u16(0, static_cast<uint16_t>(dst_val));
+ break;
+ case RawType::U32:
+ new_val.write_u32(0, static_cast<uint32_t>(dst_val));
+ break;
+ case RawType::U64:
+ new_val.write_u64(0, dst_val);
+ break;
+ case RawType::ISize:
+ new_val.write_usize(0, static_cast<int64_t>(dst_val));
+ break;
+ case RawType::I8:
+ new_val.write_i8(0, static_cast<int8_t>(dst_val));
+ break;
+ case RawType::I16:
+ new_val.write_i16(0, static_cast<int16_t>(dst_val));
+ break;
+ case RawType::I32:
+ new_val.write_i32(0, static_cast<int32_t>(dst_val));
+ break;
+ case RawType::I64:
+ new_val.write_i64(0, static_cast<int64_t>(dst_val));
+ break;
+ default:
+ throw "";
+ }
+ break;
+ case RawType::U128: throw "TODO"; /*dst_val = src_value.read_u128();*/ break;
+ case RawType::I128: throw "TODO"; /*dst_val = src_value.read_i128();*/ break;
+ }
+ } break;
+ case RawType::U128:
+ case RawType::I128:
+ LOG_TODO("Cast to " << re.type);
+ }
+ }
+ } break;
+ TU_ARM(se.src, BinOp, re) {
+ ::HIR::TypeRef ty_l, ty_r;
+ Value tmp_l, tmp_r;
+ auto v_l = state.get_value_ref_param(re.val_l, tmp_l, ty_l);
+ auto v_r = state.get_value_ref_param(re.val_r, tmp_r, ty_r);
+ LOG_DEBUG(v_l << " (" << ty_l <<") ? " << v_r << " (" << ty_r <<")");
+
+ switch(re.op)
+ {
+ case ::MIR::eBinOp::EQ:
+ case ::MIR::eBinOp::NE:
+ case ::MIR::eBinOp::GT:
+ case ::MIR::eBinOp::GE:
+ case ::MIR::eBinOp::LT:
+ case ::MIR::eBinOp::LE: {
+ LOG_ASSERT(ty_l == ty_r, "BinOp type mismatch - " << ty_l << " != " << ty_r);
+ int res = 0;
+ // TODO: Handle comparison of the relocations too
+
+ const auto& alloc_l = v_l.m_value ? v_l.m_value->allocation : v_l.m_alloc;
+ const auto& alloc_r = v_r.m_value ? v_r.m_value->allocation : v_r.m_alloc;
+ auto reloc_l = alloc_l ? v_l.get_relocation(v_l.m_offset) : RelocationPtr();
+ auto reloc_r = alloc_r ? v_r.get_relocation(v_r.m_offset) : RelocationPtr();
+
+ if( reloc_l != reloc_r )
+ {
+ res = (reloc_l < reloc_r ? -1 : 1);
+ }
+ LOG_DEBUG("res=" << res << ", " << reloc_l << " ? " << reloc_r);
+
+ if( const auto* w = ty_l.get_wrapper() )
+ {
+ if( w->type == TypeWrapper::Ty::Pointer )
+ {
+ // TODO: Technically only EQ/NE are valid.
+
+ res = res != 0 ? res : Ops::do_compare(v_l.read_usize(0), v_r.read_usize(0));
+
+ // Compare fat metadata.
+ if( res == 0 && v_l.m_size > POINTER_SIZE )
+ {
+ reloc_l = v_l.get_relocation(POINTER_SIZE);
+ reloc_r = v_r.get_relocation(POINTER_SIZE);
+
+ if( res == 0 && reloc_l != reloc_r )
+ {
+ res = (reloc_l < reloc_r ? -1 : 1);
+ }
+ res = res != 0 ? res : Ops::do_compare(v_l.read_usize(POINTER_SIZE), v_r.read_usize(POINTER_SIZE));
+ }
+ }
+ else
+ {
+ LOG_TODO("BinOp comparisons - " << se.src << " w/ " << ty_l);
+ }
+ }
+ else
+ {
+ switch(ty_l.inner_type)
+ {
+ case RawType::U64: res = res != 0 ? res : Ops::do_compare(v_l.read_u64(0), v_r.read_u64(0)); break;
+ case RawType::U32: res = res != 0 ? res : Ops::do_compare(v_l.read_u32(0), v_r.read_u32(0)); break;
+ case RawType::U16: res = res != 0 ? res : Ops::do_compare(v_l.read_u16(0), v_r.read_u16(0)); break;
+ case RawType::U8 : res = res != 0 ? res : Ops::do_compare(v_l.read_u8 (0), v_r.read_u8 (0)); break;
+ case RawType::I64: res = res != 0 ? res : Ops::do_compare(v_l.read_i64(0), v_r.read_i64(0)); break;
+ case RawType::I32: res = res != 0 ? res : Ops::do_compare(v_l.read_i32(0), v_r.read_i32(0)); break;
+ case RawType::I16: res = res != 0 ? res : Ops::do_compare(v_l.read_i16(0), v_r.read_i16(0)); break;
+ case RawType::I8 : res = res != 0 ? res : Ops::do_compare(v_l.read_i8 (0), v_r.read_i8 (0)); break;
+ case RawType::USize: res = res != 0 ? res : Ops::do_compare(v_l.read_usize(0), v_r.read_usize(0)); break;
+ case RawType::ISize: res = res != 0 ? res : Ops::do_compare(v_l.read_isize(0), v_r.read_isize(0)); break;
+ default:
+ LOG_TODO("BinOp comparisons - " << se.src << " w/ " << ty_l);
+ }
+ }
+ bool res_bool;
+ switch(re.op)
+ {
+ case ::MIR::eBinOp::EQ: res_bool = (res == 0); break;
+ case ::MIR::eBinOp::NE: res_bool = (res != 0); break;
+ case ::MIR::eBinOp::GT: res_bool = (res == 1); break;
+ case ::MIR::eBinOp::GE: res_bool = (res == 1 || res == 0); break;
+ case ::MIR::eBinOp::LT: res_bool = (res == -1); break;
+ case ::MIR::eBinOp::LE: res_bool = (res == -1 || res == 0); break;
+ break;
+ default:
+ LOG_BUG("Unknown comparison");
+ }
+ new_val = Value(::HIR::TypeRef(RawType::Bool));
+ new_val.write_u8(0, res_bool ? 1 : 0);
+ } break;
+ case ::MIR::eBinOp::BIT_SHL:
+ case ::MIR::eBinOp::BIT_SHR: {
+ LOG_ASSERT(ty_l.get_wrapper() == nullptr, "Bitwise operator on non-primitive - " << ty_l);
+ LOG_ASSERT(ty_r.get_wrapper() == nullptr, "Bitwise operator with non-primitive - " << ty_r);
+ size_t max_bits = ty_r.get_size() * 8;
+ uint8_t shift;
+ auto check_cast = [&](auto v){ LOG_ASSERT(0 <= v && v <= max_bits, "Shift out of range - " << v); return static_cast<uint8_t>(v); };
+ switch(ty_r.inner_type)
+ {
+ case RawType::U64: shift = check_cast(v_r.read_u64(0)); break;
+ case RawType::U32: shift = check_cast(v_r.read_u32(0)); break;
+ case RawType::U16: shift = check_cast(v_r.read_u16(0)); break;
+ case RawType::U8 : shift = check_cast(v_r.read_u8 (0)); break;
+ case RawType::I64: shift = check_cast(v_r.read_i64(0)); break;
+ case RawType::I32: shift = check_cast(v_r.read_i32(0)); break;
+ case RawType::I16: shift = check_cast(v_r.read_i16(0)); break;
+ case RawType::I8 : shift = check_cast(v_r.read_i8 (0)); break;
+ case RawType::USize: shift = check_cast(v_r.read_usize(0)); break;
+ case RawType::ISize: shift = check_cast(v_r.read_isize(0)); break;
+ default:
+ LOG_TODO("BinOp shift rhs unknown type - " << se.src << " w/ " << ty_r);
+ }
+ new_val = Value(ty_l);
+ switch(ty_l.inner_type)
+ {
+ // TODO: U128
+ case RawType::U64: new_val.write_u64(0, Ops::do_bitwise(v_l.read_u64(0), static_cast<uint64_t>(shift), re.op)); break;
+ case RawType::U32: new_val.write_u32(0, Ops::do_bitwise(v_l.read_u32(0), static_cast<uint32_t>(shift), re.op)); break;
+ case RawType::U16: new_val.write_u16(0, Ops::do_bitwise(v_l.read_u16(0), static_cast<uint16_t>(shift), re.op)); break;
+ case RawType::U8 : new_val.write_u8 (0, Ops::do_bitwise(v_l.read_u8 (0), static_cast<uint8_t >(shift), re.op)); break;
+ case RawType::USize: new_val.write_usize(0, Ops::do_bitwise(v_l.read_usize(0), static_cast<uint64_t>(shift), re.op)); break;
+ // TODO: Is signed allowed?
+ default:
+ LOG_TODO("BinOp shift rhs unknown type - " << se.src << " w/ " << ty_r);
+ }
+ } break;
+ case ::MIR::eBinOp::BIT_AND:
+ case ::MIR::eBinOp::BIT_OR:
+ case ::MIR::eBinOp::BIT_XOR:
+ LOG_ASSERT(ty_l == ty_r, "BinOp type mismatch - " << ty_l << " != " << ty_r);
+ LOG_ASSERT(ty_l.get_wrapper() == nullptr, "Bitwise operator on non-primitive - " << ty_l);
+ new_val = Value(ty_l);
+ switch(ty_l.inner_type)
+ {
+ // TODO: U128/I128
+ case RawType::U64:
+ case RawType::I64:
+ new_val.write_u64( 0, Ops::do_bitwise(v_l.read_u64(0), v_r.read_u64(0), re.op) );
+ break;
+ case RawType::U32:
+ case RawType::I32:
+ new_val.write_u32( 0, static_cast<uint32_t>(Ops::do_bitwise(v_l.read_u32(0), v_r.read_u32(0), re.op)) );
+ break;
+ case RawType::U16:
+ case RawType::I16:
+ new_val.write_u16( 0, static_cast<uint16_t>(Ops::do_bitwise(v_l.read_u16(0), v_r.read_u16(0), re.op)) );
+ break;
+ case RawType::U8:
+ case RawType::I8:
+ new_val.write_u8 ( 0, static_cast<uint8_t >(Ops::do_bitwise(v_l.read_u8 (0), v_r.read_u8 (0), re.op)) );
+ break;
+ case RawType::USize:
+ case RawType::ISize:
+ new_val.write_usize( 0, Ops::do_bitwise(v_l.read_usize(0), v_r.read_usize(0), re.op) );
+ break;
+ default:
+ LOG_TODO("BinOp bitwise - " << se.src << " w/ " << ty_l);
+ }
+
+ break;
+ default:
+ LOG_ASSERT(ty_l == ty_r, "BinOp type mismatch - " << ty_l << " != " << ty_r);
+ auto val_l = PrimitiveValueVirt::from_value(ty_l, v_l);
+ auto val_r = PrimitiveValueVirt::from_value(ty_r, v_r);
+ switch(re.op)
+ {
+ case ::MIR::eBinOp::ADD: val_l.get().add( val_r.get() ); break;
+ case ::MIR::eBinOp::SUB: val_l.get().subtract( val_r.get() ); break;
+ case ::MIR::eBinOp::MUL: val_l.get().multiply( val_r.get() ); break;
+ case ::MIR::eBinOp::DIV: val_l.get().divide( val_r.get() ); break;
+ case ::MIR::eBinOp::MOD: val_l.get().modulo( val_r.get() ); break;
+
+ default:
+ LOG_TODO("Unsupported binary operator?");
+ }
+ new_val = Value(ty_l);
+ val_l.get().write_to_value(new_val, 0);
+ break;
+ }
+ } break;
+ TU_ARM(se.src, UniOp, re) {
+ ::HIR::TypeRef ty;
+ auto v = state.get_value_and_type(re.val, ty);
+ LOG_ASSERT(ty.get_wrapper() == nullptr, "UniOp on wrapped type - " << ty);
+ new_val = Value(ty);
+ switch(re.op)
+ {
+ case ::MIR::eUniOp::INV:
+ switch(ty.inner_type)
+ {
+ case RawType::U128:
+ case RawType::I128:
+ LOG_TODO("UniOp::INV U128");
+ case RawType::U64:
+ case RawType::I64:
+ new_val.write_u64( 0, ~v.read_u64(0) );
+ break;
+ case RawType::U32:
+ case RawType::I32:
+ new_val.write_u32( 0, ~v.read_u32(0) );
+ break;
+ case RawType::U16:
+ case RawType::I16:
+ new_val.write_u16( 0, ~v.read_u16(0) );
+ break;
+ case RawType::U8:
+ case RawType::I8:
+ new_val.write_u8 ( 0, ~v.read_u8 (0) );
+ break;
+ case RawType::USize:
+ case RawType::ISize:
+ new_val.write_usize( 0, ~v.read_usize(0) );
+ break;
+ case RawType::Bool:
+ new_val.write_u8 ( 0, v.read_u8 (0) == 0 );
+ break;
+ default:
+ LOG_TODO("UniOp::INV - w/ type " << ty);
+ }
+ break;
+ case ::MIR::eUniOp::NEG:
+ switch(ty.inner_type)
+ {
+ case RawType::I128:
+ LOG_TODO("UniOp::NEG I128");
+ case RawType::I64:
+ new_val.write_i64( 0, -v.read_i64(0) );
+ break;
+ case RawType::I32:
+ new_val.write_i32( 0, -v.read_i32(0) );
+ break;
+ case RawType::I16:
+ new_val.write_i16( 0, -v.read_i16(0) );
+ break;
+ case RawType::I8:
+ new_val.write_i8 ( 0, -v.read_i8 (0) );
+ break;
+ case RawType::ISize:
+ new_val.write_isize( 0, -v.read_isize(0) );
+ break;
+ default:
+ LOG_ERROR("UniOp::INV not valid on type " << ty);
+ }
+ break;
+ }
+ } break;
+ TU_ARM(se.src, DstMeta, re) {
+ auto ptr = state.get_value_ref(re.val);
+
+ ::HIR::TypeRef dst_ty;
+ state.get_value_and_type(se.dst, dst_ty);
+ new_val = ptr.read_value(POINTER_SIZE, dst_ty.get_size());
+ } break;
+ TU_ARM(se.src, DstPtr, re) {
+ auto ptr = state.get_value_ref(re.val);
+
+ new_val = ptr.read_value(0, POINTER_SIZE);
+ } break;
+ TU_ARM(se.src, MakeDst, re) {
+ // - Get target type, just for some assertions
+ ::HIR::TypeRef dst_ty;
+ state.get_value_and_type(se.dst, dst_ty);
+ new_val = Value(dst_ty);
+
+ auto ptr = state.param_to_value(re.ptr_val );
+ auto meta = state.param_to_value(re.meta_val);
+ LOG_DEBUG("ty=" << dst_ty << ", ptr=" << ptr << ", meta=" << meta);
+
+ new_val.write_value(0, ::std::move(ptr));
+ new_val.write_value(POINTER_SIZE, ::std::move(meta));
+ } break;
+ TU_ARM(se.src, Tuple, re) {
+ ::HIR::TypeRef dst_ty;
+ state.get_value_and_type(se.dst, dst_ty);
+ new_val = Value(dst_ty);
+
+ for(size_t i = 0; i < re.vals.size(); i++)
+ {
+ auto fld_ofs = dst_ty.composite_type->fields.at(i).first;
+ new_val.write_value(fld_ofs, state.param_to_value(re.vals[i]));
+ }
+ } break;
+ TU_ARM(se.src, Array, re) {
+ ::HIR::TypeRef dst_ty;
+ state.get_value_and_type(se.dst, dst_ty);
+ new_val = Value(dst_ty);
+ // TODO: Assert that type is an array
+ auto inner_ty = dst_ty.get_inner();
+ size_t stride = inner_ty.get_size();
+
+ size_t ofs = 0;
+ for(const auto& v : re.vals)
+ {
+ new_val.write_value(ofs, state.param_to_value(v));
+ ofs += stride;
+ }
+ } break;
+ TU_ARM(se.src, SizedArray, re) {
+ ::HIR::TypeRef dst_ty;
+ state.get_value_and_type(se.dst, dst_ty);
+ new_val = Value(dst_ty);
+ // TODO: Assert that type is an array
+ auto inner_ty = dst_ty.get_inner();
+ size_t stride = inner_ty.get_size();
+
+ size_t ofs = 0;
+ for(size_t i = 0; i < re.count; i++)
+ {
+ new_val.write_value(ofs, state.param_to_value(re.val));
+ ofs += stride;
+ }
+ } break;
+ TU_ARM(se.src, Variant, re) {
+ // 1. Get the composite by path.
+ const auto& data_ty = this->m_modtree.get_composite(re.path);
+ auto dst_ty = ::HIR::TypeRef(&data_ty);
+ new_val = Value(dst_ty);
+ // Three cases:
+ // - Unions (no tag)
+ // - Data enums (tag and data)
+ // - Value enums (no data)
+ const auto& var = data_ty.variants.at(re.index);
+ if( var.data_field != SIZE_MAX )
+ {
+ const auto& fld = data_ty.fields.at(re.index);
+
+ new_val.write_value(fld.first, state.param_to_value(re.val));
+ }
+ if( var.base_field != SIZE_MAX )
+ {
+ ::HIR::TypeRef tag_ty;
+ size_t tag_ofs = dst_ty.get_field_ofs(var.base_field, var.field_path, tag_ty);
+ LOG_ASSERT(tag_ty.get_size() == var.tag_data.size(), "");
+ new_val.write_bytes(tag_ofs, var.tag_data.data(), var.tag_data.size());
+ }
+ else
+ {
+ // Union, no tag
+ }
+ LOG_DEBUG("Variant " << new_val);
+ } break;
+ TU_ARM(se.src, Struct, re) {
+ const auto& data_ty = m_modtree.get_composite(re.path);
+
+ ::HIR::TypeRef dst_ty;
+ state.get_value_and_type(se.dst, dst_ty);
+ new_val = Value(dst_ty);
+ LOG_ASSERT(dst_ty.composite_type == &data_ty, "Destination type of RValue::Struct isn't the same as the input");
+
+ for(size_t i = 0; i < re.vals.size(); i++)
+ {
+ auto fld_ofs = data_ty.fields.at(i).first;
+ new_val.write_value(fld_ofs, state.param_to_value(re.vals[i]));
+ }
+ } break;
+ }
+ LOG_DEBUG("- new_val=" << new_val);
+ state.write_lvalue(se.dst, ::std::move(new_val));
+ } break;
+ case ::MIR::Statement::TAG_Asm:
+ LOG_TODO(stmt);
+ break;
+ TU_ARM(stmt, Drop, se) {
+ if( se.flag_idx == ~0u || cur_frame.drop_flags.at(se.flag_idx) )
+ {
+ ::HIR::TypeRef ty;
+ auto v = state.get_value_and_type(se.slot, ty);
+
+ // - Take a pointer to the inner
+ auto alloc = v.m_alloc;
+ if( !alloc )
+ {
+ if( !v.m_value->allocation )
+ {
+ v.m_value->create_allocation();
+ }
+ alloc = RelocationPtr::new_alloc( v.m_value->allocation );
+ }
+ size_t ofs = v.m_offset;
+ assert(ty.get_meta_type() == RawType::Unreachable);
+
+ auto ptr_ty = ty.wrapped(TypeWrapper::Ty::Borrow, 2);
+
+ auto ptr_val = Value::new_pointer(ptr_ty, ofs, ::std::move(alloc));
+
+ if( !drop_value(ptr_val, ty, /*shallow=*/se.kind == ::MIR::eDropKind::SHALLOW) )
+ {
+ return false;
+ }
+ }
+ } break;
+ TU_ARM(stmt, SetDropFlag, se) {
+ bool val = (se.other == ~0u ? false : cur_frame.drop_flags.at(se.other)) != se.new_val;
+ LOG_DEBUG("- " << val);
+ cur_frame.drop_flags.at(se.idx) = val;
+ } break;
+ case ::MIR::Statement::TAG_ScopeEnd:
+ LOG_TODO(stmt);
+ break;
+ }
+
+ cur_frame.stmt_idx += 1;
+ }
+ else
+ {
+ LOG_DEBUG("=== BB" << cur_frame.bb_idx << "/TERM: " << bb.terminator);
+ switch(bb.terminator.tag())
+ {
+ case ::MIR::Terminator::TAGDEAD: throw "";
+ TU_ARM(bb.terminator, Incomplete, _te)
+ LOG_TODO("Terminator::Incomplete hit");
+ TU_ARM(bb.terminator, Diverge, _te)
+ LOG_TODO("Terminator::Diverge hit");
+ TU_ARM(bb.terminator, Panic, _te)
+ LOG_TODO("Terminator::Panic");
+ TU_ARM(bb.terminator, Goto, te)
+ cur_frame.bb_idx = te;
+ break;
+ TU_ARM(bb.terminator, Return, _te)
+ LOG_DEBUG("RETURN " << cur_frame.ret);
+ return this->pop_stack(out_thread_result);
+ TU_ARM(bb.terminator, If, te) {
+ uint8_t v = state.get_value_ref(te.cond).read_u8(0);
+ LOG_ASSERT(v == 0 || v == 1, "");
+ cur_frame.bb_idx = v ? te.bb0 : te.bb1;
+ } break;
+ TU_ARM(bb.terminator, Switch, te) {
+ ::HIR::TypeRef ty;
+ auto v = state.get_value_and_type(te.val, ty);
+ LOG_ASSERT(ty.get_wrapper() == nullptr, "Matching on wrapped value - " << ty);
+ LOG_ASSERT(ty.inner_type == RawType::Composite, "Matching on non-coposite - " << ty);
+
+ // TODO: Convert the variant list into something that makes it easier to switch on.
+ size_t found_target = SIZE_MAX;
+ size_t default_target = SIZE_MAX;
+ for(size_t i = 0; i < ty.composite_type->variants.size(); i ++)
+ {
+ const auto& var = ty.composite_type->variants[i];
+ if( var.tag_data.size() == 0 )
+ {
+ // Save as the default, error for multiple defaults
+ if( default_target != SIZE_MAX )
+ {
+ LOG_FATAL("Two variants with no tag in Switch - " << ty);
+ }
+ default_target = i;
+ }
+ else
+ {
+ // Get offset, read the value.
+ ::HIR::TypeRef tag_ty;
+ size_t tag_ofs = ty.get_field_ofs(var.base_field, var.field_path, tag_ty);
+ // Read the value bytes
+ ::std::vector<char> tmp( var.tag_data.size() );
+ v.read_bytes(tag_ofs, const_cast<char*>(tmp.data()), tmp.size());
+ if( v.get_relocation(tag_ofs) )
+ continue ;
+ if( ::std::memcmp(tmp.data(), var.tag_data.data(), tmp.size()) == 0 )
+ {
+ found_target = i;
+ break ;
+ }
+ }
+ }
+
+ if( found_target == SIZE_MAX )
+ {
+ found_target = default_target;
+ }
+ if( found_target == SIZE_MAX )
+ {
+ LOG_FATAL("Terminator::Switch on " << ty << " didn't find a variant");
+ }
+ cur_frame.bb_idx = te.targets.at(found_target);
+ } break;
+ TU_ARM(bb.terminator, SwitchValue, _te)
+ LOG_TODO("Terminator::SwitchValue");
+ TU_ARM(bb.terminator, Call, te) {
+ ::std::vector<Value> sub_args; sub_args.reserve(te.args.size());
+ for(const auto& a : te.args)
+ {
+ sub_args.push_back( state.param_to_value(a) );
+ LOG_DEBUG("#" << (sub_args.size() - 1) << " " << sub_args.back());
+ }
+ Value rv;
+ if( te.fcn.is_Intrinsic() )
+ {
+ const auto& fe = te.fcn.as_Intrinsic();
+ if( !this->call_intrinsic(rv, fe.name, fe.params, ::std::move(sub_args)) )
+ {
+ // Early return, don't want to update stmt_idx yet
+ return false;
+ }
+ }
+ else
+ {
+ RelocationPtr fcn_alloc_ptr;
+ const ::HIR::Path* fcn_p;
+ if( te.fcn.is_Path() ) {
+ fcn_p = &te.fcn.as_Path();
+ }
+ else {
+ ::HIR::TypeRef ty;
+ auto v = state.get_value_and_type(te.fcn.as_Value(), ty);
+ LOG_DEBUG("> Indirect call " << v);
+ // TODO: Assert type
+ // TODO: Assert offset/content.
+ assert(v.read_usize(0) == 0);
+ fcn_alloc_ptr = v.get_relocation(v.m_offset);
+ if( !fcn_alloc_ptr )
+ LOG_FATAL("Calling value with no relocation - " << v);
+ LOG_ASSERT(fcn_alloc_ptr.get_ty() == RelocationPtr::Ty::Function, "Calling value that isn't a function pointer");
+ fcn_p = &fcn_alloc_ptr.fcn();
+ }
+
+ LOG_DEBUG("Call " << *fcn_p);
+ if( !this->call_path(rv, *fcn_p, ::std::move(sub_args)) )
+ {
+ // Early return, don't want to update stmt_idx yet
+ return false;
+ }
+ }
+ LOG_DEBUG(te.ret_val << " = " << rv << " (resume " << cur_frame.fcn.my_path << ")");
+ state.write_lvalue(te.ret_val, rv);
+ cur_frame.bb_idx = te.ret_block;
+ } break;
+ }
+ cur_frame.stmt_idx = 0;
+ }
+
+ return false;
+}
+bool InterpreterThread::pop_stack(Value& out_thread_result)
+{
+ assert( !this->m_stack.empty() );
+
+ auto res_v = ::std::move(this->m_stack.back().ret);
+ this->m_stack.pop_back();
+
+ if( this->m_stack.empty() )
+ {
+ LOG_DEBUG("Thread complete, result " << res_v);
+ out_thread_result = ::std::move(res_v);
+ return true;
+ }
+ else
+ {
+ // Handle callback wrappers (e.g. for __rust_maybe_catch_panic, drop_value)
+ if( this->m_stack.back().cb )
+ {
+ if( !this->m_stack.back().cb(res_v, ::std::move(res_v)) )
+ {
+ return false;
+ }
+ this->m_stack.pop_back();
+ assert( !this->m_stack.empty() );
+ assert( !this->m_stack.back().cb );
+ }
+
+ auto& cur_frame = this->m_stack.back();
+ MirHelpers state { *this, cur_frame };
+
+ const auto& blk = cur_frame.fcn.m_mir.blocks.at( cur_frame.bb_idx );
+ if( cur_frame.stmt_idx < blk.statements.size() )
+ {
+ assert( blk.statements[cur_frame.stmt_idx].is_Drop() );
+ cur_frame.stmt_idx ++;
+ LOG_DEBUG("DROP complete (resume " << cur_frame.fcn.my_path << ")");
+ }
+ else
+ {
+ assert( blk.terminator.is_Call() );
+ const auto& te = blk.terminator.as_Call();
+
+ LOG_DEBUG(te.ret_val << " = " << res_v << " (resume " << cur_frame.fcn.my_path << ")");
+
+ state.write_lvalue(te.ret_val, res_v);
+ cur_frame.stmt_idx = 0;
+ cur_frame.bb_idx = te.ret_block;
+ }
+
+ return false;
+ }
+}
+
+InterpreterThread::StackFrame::StackFrame(const Function& fcn, ::std::vector<Value> args):
+ fcn(fcn),
+ ret( fcn.ret_ty ),
+ args( ::std::move(args) ),
+ locals( ),
+ drop_flags( fcn.m_mir.drop_flags ),
+ bb_idx(0),
+ stmt_idx(0)
+{
+ this->locals.reserve( fcn.m_mir.locals.size() );
+ for(const auto& ty : fcn.m_mir.locals)
+ {
+ if( ty == RawType::Unreachable ) {
+ // HACK: Locals can be !, but they can NEVER be accessed
+ this->locals.push_back( Value() );
+ }
+ else {
+ this->locals.push_back( Value(ty) );
+ }
+ }
+}
+bool InterpreterThread::call_path(Value& ret, const ::HIR::Path& path, ::std::vector<Value> args)
+{
+ // TODO: Support overriding certain functions
+ {
+ if( path == ::HIR::SimplePath { "std", { "sys", "imp", "c", "SetThreadStackGuarantee" } } )
+ {
+ ret = Value::new_i32(120); //ERROR_CALL_NOT_IMPLEMENTED
+ return true;
+ }
+
+ // - No guard page needed
+ if( path == ::HIR::SimplePath { "std", {"sys", "imp", "thread", "guard", "init" } } )
+ {
+ ret = Value::with_size(16, false);
+ ret.write_u64(0, 0);
+ ret.write_u64(8, 0);
+ return true;
+ }
+
+ // - No stack overflow handling needed
+ if( path == ::HIR::SimplePath { "std", { "sys", "imp", "stack_overflow", "imp", "init" } } )
+ {
+ return true;
+ }
+ }
+
+ const auto& fcn = m_modtree.get_function(path);
+
+ if( fcn.external.link_name != "" )
+ {
+ // External function!
+ return this->call_extern(ret, fcn.external.link_name, fcn.external.link_abi, ::std::move(args));
+ }
+
+ this->m_stack.push_back(StackFrame(fcn, ::std::move(args)));
+ return false;
+}
+
+extern "C" {
+ long sysconf(int);
+ ssize_t write(int, const void*, size_t);
+}
+bool InterpreterThread::call_extern(Value& rv, const ::std::string& link_name, const ::std::string& abi, ::std::vector<Value> args)
+{
+ if( link_name == "__rust_allocate" )
+ {
+ auto size = args.at(0).read_usize(0);
+ auto align = args.at(1).read_usize(0);
+ LOG_DEBUG("__rust_allocate(size=" << size << ", align=" << align << ")");
+ auto rty = ::HIR::TypeRef(RawType::Unit).wrap( TypeWrapper::Ty::Pointer, 0 );
+
+ // TODO: Use the alignment when making an allocation?
+ rv = Value::new_pointer(rty, 0, RelocationPtr::new_alloc(Allocation::new_alloc(size)));
+ }
+ else if( link_name == "__rust_reallocate" )
+ {
+ LOG_ASSERT(args.at(0).allocation, "__rust_reallocate first argument doesn't have an allocation");
+ auto alloc_ptr = args.at(0).get_relocation(0);
+ auto ptr_ofs = args.at(0).read_usize(0);
+ LOG_ASSERT(ptr_ofs == 0, "__rust_reallocate with offset pointer");
+ auto oldsize = args.at(1).read_usize(0);
+ auto newsize = args.at(2).read_usize(0);
+ auto align = args.at(3).read_usize(0);
+ LOG_DEBUG("__rust_reallocate(ptr=" << alloc_ptr << ", oldsize=" << oldsize << ", newsize=" << newsize << ", align=" << align << ")");
+
+ LOG_ASSERT(alloc_ptr, "__rust_reallocate with no backing allocation attached to pointer");
+ LOG_ASSERT(alloc_ptr.is_alloc(), "__rust_reallocate with no backing allocation attached to pointer");
+ auto& alloc = alloc_ptr.alloc();
+ // TODO: Check old size and alignment against allocation.
+ alloc.data.resize( (newsize + 8-1) / 8 );
+ alloc.mask.resize( (newsize + 8-1) / 8 );
+ // TODO: Should this instead make a new allocation to catch use-after-free?
+ rv = ::std::move(args.at(0));
+ }
+ else if( link_name == "__rust_deallocate" )
+ {
+ LOG_ASSERT(args.at(0).allocation, "__rust_deallocate first argument doesn't have an allocation");
+ auto alloc_ptr = args.at(0).get_relocation(0);
+ auto ptr_ofs = args.at(0).read_usize(0);
+ LOG_ASSERT(ptr_ofs == 0, "__rust_deallocate with offset pointer");
+ LOG_DEBUG("__rust_deallocate(ptr=" << alloc_ptr << ")");
+
+ LOG_ASSERT(alloc_ptr, "__rust_deallocate with no backing allocation attached to pointer");
+ LOG_ASSERT(alloc_ptr.is_alloc(), "__rust_deallocate with no backing allocation attached to pointer");
+ auto& alloc = alloc_ptr.alloc();
+ alloc.mark_as_freed();
+ // Just let it drop.
+ rv = Value();
+ }
+ else if( link_name == "__rust_maybe_catch_panic" )
+ {
+ auto fcn_path = args.at(0).get_relocation(0).fcn();
+ auto arg = args.at(1);
+ auto data_ptr = args.at(2).read_pointer_valref_mut(0, POINTER_SIZE);
+ auto vtable_ptr = args.at(3).read_pointer_valref_mut(0, POINTER_SIZE);
+
+ ::std::vector<Value> sub_args;
+ sub_args.push_back( ::std::move(arg) );
+
+ this->m_stack.push_back(StackFrame::make_wrapper([=](Value& out_rv, Value /*rv*/)->bool{
+ out_rv = Value::new_u32(0);
+ return true;
+ }));
+
+ // TODO: Catch the panic out of this.
+ if( this->call_path(rv, fcn_path, ::std::move(sub_args)) )
+ {
+ bool v = this->pop_stack(rv);
+ assert( v == false );
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+ else if( link_name == "__rust_start_panic" )
+ {
+ LOG_TODO("__rust_start_panic");
+ }
+ else if( link_name == "rust_begin_unwind" )
+ {
+ LOG_TODO("rust_begin_unwind");
+ }
+#ifdef _WIN32
+ // WinAPI functions used by libstd
+ else if( link_name == "AddVectoredExceptionHandler" )
+ {
+ LOG_DEBUG("Call `AddVectoredExceptionHandler` - Ignoring and returning non-null");
+ rv = Value::new_usize(1);
+ }
+ else if( link_name == "GetModuleHandleW" )
+ {
+ LOG_ASSERT(args.at(0).allocation.is_alloc(), "");
+ const auto& tgt_alloc = args.at(0).allocation.alloc().get_relocation(0);
+ const void* arg0 = (tgt_alloc ? tgt_alloc.alloc().data_ptr() : nullptr);
+ //extern void* GetModuleHandleW(const void* s);
+ if(arg0) {
+ LOG_DEBUG("GetModuleHandleW(" << tgt_alloc.alloc() << ")");
+ }
+ else {
+ LOG_DEBUG("GetModuleHandleW(NULL)");
+ }
+
+ auto ret = GetModuleHandleW(static_cast<LPCWSTR>(arg0));
+ if(ret)
+ {
+ rv = Value::new_ffiptr(FFIPointer { "GetModuleHandleW", ret, 0 });
+ }
+ else
+ {
+ rv = Value(::HIR::TypeRef(RawType::USize));
+ rv.create_allocation();
+ rv.write_usize(0,0);
+ }
+ }
+ else if( link_name == "GetProcAddress" )
+ {
+ LOG_ASSERT(args.at(0).allocation.is_alloc(), "");
+ const auto& handle_alloc = args.at(0).allocation.alloc().get_relocation(0);
+ LOG_ASSERT(args.at(1).allocation.is_alloc(), "");
+ const auto& sym_alloc = args.at(1).allocation.alloc().get_relocation(0);
+
+ // TODO: Ensure that first arg is a FFI pointer with offset+size of zero
+ void* handle = handle_alloc.ffi().ptr_value;
+ // TODO: Get either a FFI data pointer, or a inner data pointer
+ const void* symname = sym_alloc.alloc().data_ptr();
+ // TODO: Sanity check that it's a valid c string within its allocation
+ LOG_DEBUG("FFI GetProcAddress(" << handle << ", \"" << static_cast<const char*>(symname) << "\")");
+
+ auto ret = GetProcAddress(static_cast<HMODULE>(handle), static_cast<LPCSTR>(symname));
+
+ if( ret )
+ {
+ rv = Value::new_ffiptr(FFIPointer { "GetProcAddress", ret, 0 });
+ }
+ else
+ {
+ rv = Value(::HIR::TypeRef(RawType::USize));
+ rv.create_allocation();
+ rv.write_usize(0,0);
+ }
+ }
+#else
+ // POSIX
+ else if( link_name == "write" )
+ {
+ auto fd = args.at(0).read_i32(0);
+ auto count = args.at(2).read_isize(0);
+ const auto* buf = args.at(1).read_pointer_const(0, count);
+
+ ssize_t val = write(fd, buf, count);
+
+ rv = Value::new_isize(val);
+ }
+ else if( link_name == "sysconf" )
+ {
+ auto name = args.at(0).read_i32(0);
+ LOG_DEBUG("FFI sysconf(" << name << ")");
+
+ long val = sysconf(name);
+
+ rv = Value::new_usize(val);
+ }
+ else if( link_name == "pthread_mutex_init" || link_name == "pthread_mutex_lock" || link_name == "pthread_mutex_unlock" || link_name == "pthread_mutex_destroy" )
+ {
+ rv = Value::new_i32(0);
+ }
+ else if( link_name == "pthread_rwlock_rdlock" )
+ {
+ rv = Value::new_i32(0);
+ }
+ else if( link_name == "pthread_mutexattr_init" || link_name == "pthread_mutexattr_settype" || link_name == "pthread_mutexattr_destroy" )
+ {
+ rv = Value::new_i32(0);
+ }
+ else if( link_name == "pthread_condattr_init" || link_name == "pthread_condattr_destroy" || link_name == "pthread_condattr_setclock" )
+ {
+ rv = Value::new_i32(0);
+ }
+ else if( link_name == "pthread_cond_init" || link_name == "pthread_cond_destroy" )
+ {
+ rv = Value::new_i32(0);
+ }
+ else if( link_name == "pthread_key_create" )
+ {
+ auto key_ref = args.at(0).read_pointer_valref_mut(0, 4);
+
+ auto key = ThreadState::s_next_tls_key ++;
+ key_ref.m_alloc.alloc().write_u32( key_ref.m_offset, key );
+
+ rv = Value::new_i32(0);
+ }
+ else if( link_name == "pthread_getspecific" )
+ {
+ auto key = args.at(0).read_u32(0);
+
+ // Get a pointer-sized value from storage
+ uint64_t v = key < m_thread.tls_values.size() ? m_thread.tls_values[key] : 0;
+
+ rv = Value::new_usize(v);
+ }
+ else if( link_name == "pthread_setspecific" )
+ {
+ auto key = args.at(0).read_u32(0);
+ auto v = args.at(1).read_u64(0);
+
+ // Get a pointer-sized value from storage
+ if( key >= m_thread.tls_values.size() ) {
+ m_thread.tls_values.resize(key+1);
+ }
+ m_thread.tls_values[key] = v;
+
+ rv = Value::new_i32(0);
+ }
+ else if( link_name == "pthread_key_delete" )
+ {
+ rv = Value::new_i32(0);
+ }
+#endif
+ // std C
+ else if( link_name == "signal" )
+ {
+ LOG_DEBUG("Call `signal` - Ignoring and returning SIG_IGN");
+ rv = Value(::HIR::TypeRef(RawType::USize));
+ rv.write_usize(0, 1);
+ }
+ // - `void *memchr(const void *s, int c, size_t n);`
+ else if( link_name == "memchr" )
+ {
+ auto ptr_alloc = args.at(0).get_relocation(0);
+ auto c = args.at(1).read_i32(0);
+ auto n = args.at(2).read_usize(0);
+ const void* ptr = args.at(0).read_pointer_const(0, n);
+
+ const void* ret = memchr(ptr, c, n);
+
+ rv = Value(::HIR::TypeRef(RawType::USize));
+ rv.create_allocation();
+ if( ret )
+ {
+ rv.write_usize(0, args.at(0).read_usize(0) + ( static_cast<const uint8_t*>(ret) - static_cast<const uint8_t*>(ptr) ));
+ rv.allocation->relocations.push_back({ 0, ptr_alloc });
+ }
+ else
+ {
+ rv.write_usize(0, 0);
+ }
+ }
+ else if( link_name == "memrchr" )
+ {
+ auto ptr_alloc = args.at(0).get_relocation(0);
+ auto c = args.at(1).read_i32(0);
+ auto n = args.at(2).read_usize(0);
+ const void* ptr = args.at(0).read_pointer_const(0, n);
+
+ const void* ret = memrchr(ptr, c, n);
+
+ rv = Value(::HIR::TypeRef(RawType::USize));
+ rv.create_allocation();
+ if( ret )
+ {
+ rv.write_usize(0, args.at(0).read_usize(0) + ( static_cast<const uint8_t*>(ret) - static_cast<const uint8_t*>(ptr) ));
+ rv.allocation->relocations.push_back({ 0, ptr_alloc });
+ }
+ else
+ {
+ rv.write_usize(0, 0);
+ }
+ }
+ else if( link_name == "strlen" )
+ {
+ // strlen - custom implementation to ensure validity
+ bool _is_mut;
+ size_t size;
+ const char* ptr = reinterpret_cast<const char*>( args.at(0).read_pointer_unsafe(0, 1, size, _is_mut) );
+ size_t len = 0;
+ while(size -- && *ptr)
+ {
+ ptr ++;
+ len ++;
+ }
+ args.at(0).read_pointer_const(0, len + 1);
+
+ //rv = Value::new_usize(len);
+ rv = Value(::HIR::TypeRef(RawType::USize));
+ rv.write_usize(0, len);
+ }
+ // Allocators!
+ else
+ {
+ LOG_TODO("Call external function " << link_name);
+ }
+ return true;
+}
+
+bool InterpreterThread::call_intrinsic(Value& rv, const ::std::string& name, const ::HIR::PathParams& ty_params, ::std::vector<Value> args)
+{
+ TRACE_FUNCTION_R(name, rv);
+ for(const auto& a : args)
+ LOG_DEBUG("#" << (&a - args.data()) << ": " << a);
+ if( name == "type_id" )
+ {
+ const auto& ty_T = ty_params.tys.at(0);
+ static ::std::vector<HIR::TypeRef> type_ids;
+ auto it = ::std::find(type_ids.begin(), type_ids.end(), ty_T);
+ if( it == type_ids.end() )
+ {
+ it = type_ids.insert(it, ty_T);
+ }
+
+ rv = Value::with_size(POINTER_SIZE, false);
+ rv.write_usize(0, it - type_ids.begin());
+ }
+ else if( name == "atomic_fence" || name == "atomic_fence_acq" )
+ {
+ rv = Value();
+ }
+ else if( name == "atomic_store" )
+ {
+ auto& ptr_val = args.at(0);
+ auto& data_val = args.at(1);
+
+ LOG_ASSERT(ptr_val.size() == POINTER_SIZE, "atomic_store of a value that isn't a pointer-sized value");
+
+ // There MUST be a relocation at this point with a valid allocation.
+ auto alloc = ptr_val.get_relocation(0);
+ LOG_ASSERT(alloc, "Deref of a value with no relocation");
+
+ // TODO: Atomic side of this?
+ size_t ofs = ptr_val.read_usize(0);
+ alloc.alloc().write_value(ofs, ::std::move(data_val));
+ }
+ else if( name == "atomic_load" || name == "atomic_load_relaxed" )
+ {
+ auto& ptr_val = args.at(0);
+ LOG_ASSERT(ptr_val.size() == POINTER_SIZE, "atomic_store of a value that isn't a pointer-sized value");
+
+ // There MUST be a relocation at this point with a valid allocation.
+ auto alloc = ptr_val.get_relocation(0);
+ LOG_ASSERT(alloc, "Deref of a value with no relocation");
+ // TODO: Atomic lock the allocation.
+
+ size_t ofs = ptr_val.read_usize(0);
+ const auto& ty = ty_params.tys.at(0);
+
+ rv = alloc.alloc().read_value(ofs, ty.get_size());
+ }
+ else if( name == "atomic_xadd" || name == "atomic_xadd_relaxed" )
+ {
+ const auto& ty_T = ty_params.tys.at(0);
+ auto ptr_ofs = args.at(0).read_usize(0);
+ auto ptr_alloc = args.at(0).get_relocation(0);
+ auto v = args.at(1).read_value(0, ty_T.get_size());
+
+ // TODO: Atomic lock the allocation.
+ if( !ptr_alloc || !ptr_alloc.is_alloc() ) {
+ LOG_ERROR("atomic pointer has no allocation");
+ }
+
+ // - Result is the original value
+ rv = ptr_alloc.alloc().read_value(ptr_ofs, ty_T.get_size());
+
+ auto val_l = PrimitiveValueVirt::from_value(ty_T, rv);
+ const auto val_r = PrimitiveValueVirt::from_value(ty_T, v);
+ val_l.get().add( val_r.get() );
+
+ val_l.get().write_to_value( ptr_alloc.alloc(), ptr_ofs );
+ }
+ else if( name == "atomic_xsub" || name == "atomic_xsub_relaxed" || name == "atomic_xsub_rel" )
+ {
+ const auto& ty_T = ty_params.tys.at(0);
+ auto ptr_ofs = args.at(0).read_usize(0);
+ auto ptr_alloc = args.at(0).get_relocation(0);
+ auto v = args.at(1).read_value(0, ty_T.get_size());
+
+ // TODO: Atomic lock the allocation.
+ if( !ptr_alloc || !ptr_alloc.is_alloc() ) {
+ LOG_ERROR("atomic pointer has no allocation");
+ }
+
+ // - Result is the original value
+ rv = ptr_alloc.alloc().read_value(ptr_ofs, ty_T.get_size());
+
+ auto val_l = PrimitiveValueVirt::from_value(ty_T, rv);
+ const auto val_r = PrimitiveValueVirt::from_value(ty_T, v);
+ val_l.get().subtract( val_r.get() );
+
+ val_l.get().write_to_value( ptr_alloc.alloc(), ptr_ofs );
+ }
+ else if( name == "atomic_xchg" )
+ {
+ const auto& ty_T = ty_params.tys.at(0);
+ auto data_ref = args.at(0).read_pointer_valref_mut(0, ty_T.get_size());
+ const auto& new_v = args.at(1);
+
+ rv = data_ref.read_value(0, new_v.size());
+ data_ref.m_alloc.alloc().write_value( data_ref.m_offset, new_v );
+ }
+ else if( name == "atomic_cxchg" )
+ {
+ const auto& ty_T = ty_params.tys.at(0);
+ // TODO: Get a ValueRef to the target location
+ auto data_ref = args.at(0).read_pointer_valref_mut(0, ty_T.get_size());
+ const auto& old_v = args.at(1);
+ const auto& new_v = args.at(2);
+ rv = Value::with_size( ty_T.get_size() + 1, false );
+ rv.write_value(0, data_ref.read_value(0, old_v.size()));
+ LOG_DEBUG("> *ptr = " << data_ref);
+ if( data_ref.compare(old_v.data_ptr(), old_v.size()) == true ) {
+ data_ref.m_alloc.alloc().write_value( data_ref.m_offset, new_v );
+ rv.write_u8( old_v.size(), 1 );
+ }
+ else {
+ rv.write_u8( old_v.size(), 0 );
+ }
+ }
+ else if( name == "transmute" )
+ {
+ // Transmute requires the same size, so just copying the value works
+ rv = ::std::move(args.at(0));
+ }
+ else if( name == "assume" )
+ {
+ // Assume is a no-op which returns unit
+ }
+ else if( name == "offset" )
+ {
+ auto ptr_alloc = args.at(0).get_relocation(0);
+ auto ptr_ofs = args.at(0).read_usize(0);
+ auto& ofs_val = args.at(1);
+
+ auto delta_counts = ofs_val.read_usize(0);
+ auto new_ofs = ptr_ofs + delta_counts * ty_params.tys.at(0).get_size();
+ if(POINTER_SIZE != 8) {
+ new_ofs &= 0xFFFFFFFF;
+ }
+
+ rv = ::std::move(args.at(0));
+ rv.write_usize(0, new_ofs);
+ if( ptr_alloc ) {
+ rv.allocation->relocations.push_back({ 0, ptr_alloc });
+ }
+ }
+ // effectively ptr::write
+ else if( name == "move_val_init" )
+ {
+ auto& ptr_val = args.at(0);
+ auto& data_val = args.at(1);
+
+ LOG_ASSERT(ptr_val.size() == POINTER_SIZE, "move_val_init of an address that isn't a pointer-sized value");
+
+ // There MUST be a relocation at this point with a valid allocation.
+ LOG_ASSERT(ptr_val.allocation, "Deref of a value with no allocation (hence no relocations)");
+ LOG_TRACE("Deref " << ptr_val << " and store " << data_val);
+
+ auto ptr_alloc = ptr_val.get_relocation(0);
+ LOG_ASSERT(ptr_alloc, "Deref of a value with no relocation");
+
+ size_t ofs = ptr_val.read_usize(0);
+ ptr_alloc.alloc().write_value(ofs, ::std::move(data_val));
+ LOG_DEBUG(ptr_alloc.alloc());
+ }
+ else if( name == "uninit" )
+ {
+ rv = Value(ty_params.tys.at(0));
+ }
+ else if( name == "init" )
+ {
+ rv = Value(ty_params.tys.at(0));
+ rv.mark_bytes_valid(0, rv.size());
+ }
+ // - Unsized stuff
+ else if( name == "size_of_val" )
+ {
+ auto& val = args.at(0);
+ const auto& ty = ty_params.tys.at(0);
+ rv = Value(::HIR::TypeRef(RawType::USize));
+ // Get unsized type somehow.
+ // - _HAS_ to be the last type, so that makes it easier
+ size_t fixed_size = 0;
+ if( const auto* ity = ty.get_unsized_type(fixed_size) )
+ {
+ const auto meta_ty = ty.get_meta_type();
+ LOG_DEBUG("size_of_val - " << ty << " ity=" << *ity << " meta_ty=" << meta_ty << " fixed_size=" << fixed_size);
+ size_t flex_size = 0;
+ if( const auto* w = ity->get_wrapper() )
+ {
+ LOG_ASSERT(w->type == TypeWrapper::Ty::Slice, "size_of_val on wrapped type that isn't a slice - " << *ity);
+ size_t item_size = ity->get_inner().get_size();
+ size_t item_count = val.read_usize(POINTER_SIZE);
+ flex_size = item_count * item_size;
+ LOG_DEBUG("> item_size=" << item_size << " item_count=" << item_count << " flex_size=" << flex_size);
+ }
+ else if( ity->inner_type == RawType::Str )
+ {
+ flex_size = val.read_usize(POINTER_SIZE);
+ }
+ else if( ity->inner_type == RawType::TraitObject )
+ {
+ LOG_TODO("size_of_val - Trait Object - " << ty);
+ }
+ else
+ {
+ LOG_BUG("Inner unsized type unknown - " << *ity);
+ }
+
+ rv.write_usize(0, fixed_size + flex_size);
+ }
+ else
+ {
+ rv.write_usize(0, ty.get_size());
+ }
+ }
+ else if( name == "drop_in_place" )
+ {
+ auto& val = args.at(0);
+ const auto& ty = ty_params.tys.at(0);
+ return drop_value(val, ty);
+ }
+ // ----------------------------------------------------------------
+ // Checked arithmatic
+ else if( name == "add_with_overflow" )
+ {
+ const auto& ty = ty_params.tys.at(0);
+
+ auto lhs = PrimitiveValueVirt::from_value(ty, args.at(0));
+ auto rhs = PrimitiveValueVirt::from_value(ty, args.at(1));
+ bool didnt_overflow = lhs.get().add( rhs.get() );
+
+ // Get return type - a tuple of `(T, bool,)`
+ ::HIR::GenericPath gp;
+ gp.m_params.tys.push_back(ty);
+ gp.m_params.tys.push_back(::HIR::TypeRef { RawType::Bool });
+ const auto& dty = m_modtree.get_composite(gp);
+
+ rv = Value(::HIR::TypeRef(&dty));
+ lhs.get().write_to_value(rv, dty.fields[0].first);
+ rv.write_u8( dty.fields[1].first, didnt_overflow ? 0 : 1 ); // Returns true if overflow happened
+ }
+ else if( name == "sub_with_overflow" )
+ {
+ const auto& ty = ty_params.tys.at(0);
+
+ auto lhs = PrimitiveValueVirt::from_value(ty, args.at(0));
+ auto rhs = PrimitiveValueVirt::from_value(ty, args.at(1));
+ bool didnt_overflow = lhs.get().subtract( rhs.get() );
+
+ // Get return type - a tuple of `(T, bool,)`
+ ::HIR::GenericPath gp;
+ gp.m_params.tys.push_back(ty);
+ gp.m_params.tys.push_back(::HIR::TypeRef { RawType::Bool });
+ const auto& dty = m_modtree.get_composite(gp);
+
+ rv = Value(::HIR::TypeRef(&dty));
+ lhs.get().write_to_value(rv, dty.fields[0].first);
+ rv.write_u8( dty.fields[1].first, didnt_overflow ? 0 : 1 ); // Returns true if overflow happened
+ }
+ else if( name == "mul_with_overflow" )
+ {
+ const auto& ty = ty_params.tys.at(0);
+
+ auto lhs = PrimitiveValueVirt::from_value(ty, args.at(0));
+ auto rhs = PrimitiveValueVirt::from_value(ty, args.at(1));
+ bool didnt_overflow = lhs.get().multiply( rhs.get() );
+
+ // Get return type - a tuple of `(T, bool,)`
+ ::HIR::GenericPath gp;
+ gp.m_params.tys.push_back(ty);
+ gp.m_params.tys.push_back(::HIR::TypeRef { RawType::Bool });
+ const auto& dty = m_modtree.get_composite(gp);
+
+ rv = Value(::HIR::TypeRef(&dty));
+ lhs.get().write_to_value(rv, dty.fields[0].first);
+ rv.write_u8( dty.fields[1].first, didnt_overflow ? 0 : 1 ); // Returns true if overflow happened
+ }
+ // Overflowing artithmatic
+ else if( name == "overflowing_sub" )
+ {
+ const auto& ty = ty_params.tys.at(0);
+
+ auto lhs = PrimitiveValueVirt::from_value(ty, args.at(0));
+ auto rhs = PrimitiveValueVirt::from_value(ty, args.at(1));
+ lhs.get().subtract( rhs.get() );
+
+ rv = Value(ty);
+ lhs.get().write_to_value(rv, 0);
+ }
+ // ----------------------------------------------------------------
+ // memcpy
+ else if( name == "copy_nonoverlapping" )
+ {
+ auto src_ofs = args.at(0).read_usize(0);
+ auto src_alloc = args.at(0).get_relocation(0);
+ auto dst_ofs = args.at(1).read_usize(0);
+ auto dst_alloc = args.at(1).get_relocation(0);
+ size_t ent_count = args.at(2).read_usize(0);
+ size_t ent_size = ty_params.tys.at(0).get_size();
+ auto byte_count = ent_count * ent_size;
+
+ LOG_ASSERT(src_alloc, "Source of copy* must have an allocation");
+ LOG_ASSERT(dst_alloc, "Destination of copy* must be a memory allocation");
+ LOG_ASSERT(dst_alloc.is_alloc(), "Destination of copy* must be a memory allocation");
+
+ switch(src_alloc.get_ty())
+ {
+ case RelocationPtr::Ty::Allocation: {
+ auto v = src_alloc.alloc().read_value(src_ofs, byte_count);
+ LOG_DEBUG("v = " << v);
+ dst_alloc.alloc().write_value(dst_ofs, ::std::move(v));
+ } break;
+ case RelocationPtr::Ty::StdString:
+ LOG_ASSERT(src_ofs <= src_alloc.str().size(), "");
+ LOG_ASSERT(byte_count <= src_alloc.str().size(), "");
+ LOG_ASSERT(src_ofs + byte_count <= src_alloc.str().size(), "");
+ dst_alloc.alloc().write_bytes(dst_ofs, src_alloc.str().data() + src_ofs, byte_count);
+ break;
+ case RelocationPtr::Ty::Function:
+ LOG_FATAL("Attempt to copy* a function");
+ break;
+ case RelocationPtr::Ty::FfiPointer:
+ LOG_ASSERT(src_ofs <= src_alloc.ffi().size, "");
+ LOG_ASSERT(byte_count <= src_alloc.ffi().size, "");
+ LOG_ASSERT(src_ofs + byte_count <= src_alloc.ffi().size, "");
+ dst_alloc.alloc().write_bytes(dst_ofs, src_alloc.ffi().ptr_value + src_ofs, byte_count);
+ break;
+ }
+ }
+ else
+ {
+ LOG_TODO("Call intrinsic \"" << name << "\"");
+ }
+ return true;
+}
+
+// TODO: Use a ValueRef instead?
+bool InterpreterThread::drop_value(Value ptr, const ::HIR::TypeRef& ty, bool is_shallow/*=false*/)
+{
+ // TODO: After the drop is done, flag the backing allocation for `ptr` as freed
+ if( is_shallow )
+ {
+ // HACK: Only works for Box<T> where the first pointer is the data pointer
+ auto box_ptr_vr = ptr.read_pointer_valref_mut(0, POINTER_SIZE);
+ auto ofs = box_ptr_vr.read_usize(0);
+ auto alloc = box_ptr_vr.get_relocation(0);
+ if( ofs != 0 || !alloc || !alloc.is_alloc() ) {
+ LOG_ERROR("Attempting to shallow drop with invalid pointer (no relocation or non-zero offset) - " << box_ptr_vr);
+ }
+
+ LOG_DEBUG("drop_value SHALLOW deallocate " << alloc);
+ alloc.alloc().mark_as_freed();
+ return true;
+ }
+ if( const auto* w = ty.get_wrapper() )
+ {
+ switch( w->type )
+ {
+ case TypeWrapper::Ty::Borrow:
+ if( w->size == static_cast<size_t>(::HIR::BorrowType::Move) )
+ {
+ LOG_TODO("Drop - " << ty << " - dereference and go to inner");
+ // TODO: Clear validity on the entire inner value.
+ //auto iptr = ptr.read_value(0, ty.get_size());
+ //drop_value(iptr, ty.get_inner());
+ }
+ else
+ {
+ // No destructor
+ }
+ break;
+ case TypeWrapper::Ty::Pointer:
+ // No destructor
+ break;
+ case TypeWrapper::Ty::Slice: {
+ // - Get thin pointer and count
+ auto ofs = ptr.read_usize(0);
+ auto ptr_reloc = ptr.get_relocation(0);
+ auto count = ptr.read_usize(POINTER_SIZE);
+
+ auto ity = ty.get_inner();
+ auto pty = ity.wrapped(TypeWrapper::Ty::Borrow, static_cast<size_t>(::HIR::BorrowType::Move));
+ for(uint64_t i = 0; i < count; i ++)
+ {
+ auto ptr = Value::new_pointer(pty, ofs, ptr_reloc);
+ if( !drop_value(ptr, ity) ) {
+ LOG_TODO("Handle closure looping when dropping a slice");
+ }
+ ofs += ity.get_size();
+ }
+ } break;
+ // TODO: Arrays?
+ default:
+ LOG_TODO("Drop - " << ty << " - array?");
+ break;
+ }
+ }
+ else
+ {
+ if( ty.inner_type == RawType::Composite )
+ {
+ if( ty.composite_type->drop_glue != ::HIR::Path() )
+ {
+ LOG_DEBUG("Drop - " << ty);
+
+ Value tmp;
+ return this->call_path(tmp, ty.composite_type->drop_glue, { ptr });
+ }
+ else
+ {
+ // No drop glue
+ }
+ }
+ else if( ty.inner_type == RawType::TraitObject )
+ {
+ LOG_TODO("Drop - " << ty << " - trait object");
+ }
+ else
+ {
+ // No destructor
+ }
+ }
+ return true;
+}
diff --git a/tools/standalone_miri/miri.hpp b/tools/standalone_miri/miri.hpp
new file mode 100644
index 00000000..09d92a9b
--- /dev/null
+++ b/tools/standalone_miri/miri.hpp
@@ -0,0 +1,79 @@
+
+#pragma once
+#include "module_tree.hpp"
+#include "value.hpp"
+
+struct ThreadState
+{
+ static unsigned s_next_tls_key;
+ unsigned call_stack_depth;
+ ::std::vector<uint64_t> tls_values;
+
+ ThreadState():
+ call_stack_depth(0)
+ {
+ }
+
+ struct DecOnDrop {
+ unsigned* p;
+ ~DecOnDrop() { (*p) --; }
+ };
+ DecOnDrop enter_function() {
+ this->call_stack_depth ++;
+ return DecOnDrop { &this->call_stack_depth };
+ }
+};
+
+class InterpreterThread
+{
+ friend struct MirHelpers;
+ struct StackFrame
+ {
+ ::std::function<bool(Value&,Value)> cb;
+ const Function& fcn;
+ Value ret;
+ ::std::vector<Value> args;
+ ::std::vector<Value> locals;
+ ::std::vector<bool> drop_flags;
+
+ unsigned bb_idx;
+ unsigned stmt_idx;
+
+ StackFrame(const Function& fcn, ::std::vector<Value> args);
+ static StackFrame make_wrapper(::std::function<bool(Value&,Value)> cb) {
+ static Function f;
+ StackFrame rv(f, {});
+ rv.cb = ::std::move(cb);
+ return rv;
+ }
+ };
+
+ ModuleTree& m_modtree;
+ ThreadState m_thread;
+ ::std::vector<StackFrame> m_stack;
+
+public:
+ InterpreterThread(ModuleTree& modtree):
+ m_modtree(modtree)
+ {
+ }
+ ~InterpreterThread();
+
+ void start(const ::HIR::Path& p, ::std::vector<Value> args);
+ // Returns `true` if the call stack empties
+ bool step_one(Value& out_thread_result);
+
+private:
+ bool pop_stack(Value& out_thread_result);
+
+ // Returns true if the call was resolved instantly
+ bool call_path(Value& ret_val, const ::HIR::Path& p, ::std::vector<Value> args);
+ // Returns true if the call was resolved instantly
+ bool call_extern(Value& ret_val, const ::std::string& name, const ::std::string& abi, ::std::vector<Value> args);
+ // Returns true if the call was resolved instantly
+ bool call_intrinsic(Value& ret_val, const ::std::string& name, const ::HIR::PathParams& pp, ::std::vector<Value> args);
+
+ // Returns true if the call was resolved instantly
+ bool drop_value(Value ptr, const ::HIR::TypeRef& ty, bool is_shallow=false);
+};
+
diff --git a/tools/standalone_miri/module_tree.cpp b/tools/standalone_miri/module_tree.cpp
index 2513140a..8beba018 100644
--- a/tools/standalone_miri/module_tree.cpp
+++ b/tools/standalone_miri/module_tree.cpp
@@ -5,6 +5,7 @@
#include "lex.hpp"
#include "value.hpp"
#include <iostream>
+#include <algorithm> // std::find
#include "debug.hpp"
ModuleTree::ModuleTree()
@@ -31,18 +32,19 @@ struct Parser
RawType parse_core_type();
::HIR::TypeRef parse_type();
::HIR::GenericPath parse_tuple();
+
+ const DataType* get_composite(::HIR::GenericPath gp);
};
void ModuleTree::load_file(const ::std::string& path)
{
if( !loaded_files.insert(path).second )
{
- ::std::cout << "DEBUG: load_file(" << path << ") - Already loaded" << ::std::endl;
+ LOG_DEBUG("load_file(" << path << ") - Already loaded");
return ;
}
- ::std::cout << "DEBUG: load_file(" << path << ")" << ::std::endl;
- //TRACE_FUNCTION_F(path);
+ TRACE_FUNCTION_R(path, "");
auto parse = Parser { *this, path };
while(parse.parse_one())
@@ -53,8 +55,8 @@ void ModuleTree::load_file(const ::std::string& path)
// Parse a single item from a .mir file
bool Parser::parse_one()
{
- //::std::cout << "DEBUG: parse_one" << ::std::endl;
- if( lex.next() == "" ) // EOF?
+ //TRACE_FUNCTION_F("");
+ if( lex.next() == TokenClass::Eof )
{
return false;
}
@@ -65,7 +67,7 @@ bool Parser::parse_one()
lex.check(TokenClass::String);
auto path = ::std::move(lex.next().strval);
lex.consume();
- //::std::cout << "DEBUG: parse_one - crate '" << path << "'" << ::std::endl;
+ //LOG_TRACE(lex << "crate '" << path << "'");
lex.check_consume(';');
@@ -75,7 +77,7 @@ bool Parser::parse_one()
else if( lex.consume_if("fn") )
{
auto p = parse_path();
- //::std::cout << "DEBUG: parse_one - fn " << p << ::std::endl;
+ //LOG_TRACE(lex << "fn " << p);
lex.check_consume('(');
::std::vector<::HIR::TypeRef> arg_tys;
@@ -91,7 +93,7 @@ bool Parser::parse_one()
{
rv_ty = parse_type();
}
-
+
if( lex.consume_if('=') )
{
auto link_name = ::std::move(lex.check_consume(TokenClass::String).strval);
@@ -99,6 +101,7 @@ bool Parser::parse_one()
auto abi = ::std::move(lex.check_consume(TokenClass::String).strval);
lex.check_consume(';');
+ LOG_DEBUG(lex << "extern fn " << p);
auto p2 = p;
tree.functions.insert( ::std::make_pair(::std::move(p), Function { ::std::move(p2), ::std::move(arg_tys), rv_ty, {link_name, abi}, {} }) );
}
@@ -106,6 +109,7 @@ bool Parser::parse_one()
{
auto body = parse_body();
+ LOG_DEBUG(lex << "fn " << p);
auto p2 = p;
tree.functions.insert( ::std::make_pair(::std::move(p), Function { ::std::move(p2), ::std::move(arg_tys), rv_ty, {}, ::std::move(body) }) );
}
@@ -113,13 +117,22 @@ bool Parser::parse_one()
else if( lex.consume_if("static") )
{
auto p = parse_path();
- //::std::cout << "DEBUG: parse_one - static " << p << ::std::endl;
+ //LOG_TRACE(lex << "static " << p);
lex.check_consume(':');
auto ty = parse_type();
// TODO: externs?
lex.check_consume('=');
lex.check(TokenClass::String);
auto data = ::std::move(lex.consume().strval);
+
+ Static s;
+ s.val = Value(ty);
+ // - Statics need to always have an allocation (for references)
+ if( !s.val.allocation )
+ s.val.create_allocation();
+ s.val.write_bytes(0, data.data(), data.size());
+ s.ty = ty;
+
if( lex.consume_if('{') )
{
while( !lex.consume_if('}') )
@@ -134,15 +147,20 @@ bool Parser::parse_one()
if( lex.next() == TokenClass::String )
{
auto reloc_str = ::std::move(lex.consume().strval);
- // TODO: Add relocation
+
+ auto a = Allocation::new_alloc( reloc_str.size() );
+ //a.alloc().set_tag();
+ a->write_bytes(0, reloc_str.data(), reloc_str.size());
+ s.val.allocation->relocations.push_back({ ofs, /*size,*/ RelocationPtr::new_alloc(::std::move(a)) });
}
- else if( lex.next() == "::" )
+ else if( lex.next() == "::" || lex.next() == "<" )
{
auto reloc_path = parse_path();
- // TODO: Add relocation
+ s.val.allocation->relocations.push_back({ ofs, /*size,*/ RelocationPtr::new_fcn(reloc_path) });
}
else
{
+ LOG_FATAL(lex << "Unexepcted token " << lex.next() << " in relocation value");
throw "ERROR";
}
if( ! lex.consume_if(',') ) {
@@ -153,15 +171,13 @@ bool Parser::parse_one()
}
lex.check_consume(';');
- Value val = Value(ty);
- val.write_bytes(0, data.data(), data.size());
-
- tree.statics.insert(::std::make_pair( ::std::move(p), ::std::move(val) ));
+ LOG_DEBUG(lex << "static " << p);
+ tree.statics.insert(::std::make_pair( ::std::move(p), ::std::move(s) ));
}
else if( lex.consume_if("type") )
{
auto p = (lex.consume_if('(')) ? parse_tuple() : parse_genericpath();
- //::std::cout << "DEBUG: parse_one - type " << p << ::std::endl;
+ //LOG_TRACE("type " << p);
auto rv = DataType {};
rv.my_path = p;
@@ -206,7 +222,7 @@ bool Parser::parse_one()
lex.check_consume('=');
auto ty = parse_type();
lex.check_consume(';');
- //::std::cout << ofs << " " << ty << ::std::endl;
+ //LOG_DEBUG(ofs << " " << ty);
rv.fields.push_back(::std::make_pair(ofs, ::std::move(ty)));
}
@@ -250,7 +266,7 @@ bool Parser::parse_one()
//const auto* tag_ty = &rv.fields.at(base_idx).second;
//for(auto idx : other_idx)
//{
- // assert(tag_ty->wrappers.size() == 0);
+ // assert(tag_ty->get_wrapper() == nullptr);
// assert(tag_ty->inner_type == RawType::Composite);
// LOG_TODO(lex << "Calculate tag offset with nested tag - " << idx << " ty=" << *tag_ty);
//}
@@ -272,10 +288,10 @@ bool Parser::parse_one()
if( rv.alignment == 0 && rv.fields.size() != 0 )
{
- ::std::cerr << lex << "Alignment of zero with fields is invalid, " << p << ::std::endl;
- throw "ERROR";
+ LOG_ERROR(lex << "Alignment of zero with fields is invalid, " << p);
}
+ LOG_DEBUG(lex << "type " << p);
auto it = this->tree.data_types.find(p);
if( it != this->tree.data_types.end() )
{
@@ -285,10 +301,7 @@ bool Parser::parse_one()
}
else
{
- //::std::cerr << lex << "Duplicate definition of " << p << ::std::endl;
-
- // Not really an error, can happen when loading crates
- //throw "ERROR";
+ //LOG_ERROR(lex << "Duplicate definition of " << p);
}
}
else
@@ -298,7 +311,7 @@ bool Parser::parse_one()
}
else
{
- ::std::cerr << lex << "Unexpected token at root - " << lex.next() << ::std::endl;
+ LOG_ERROR(lex << "Unexpected token at root - " << lex.next());
// Unknown item type
throw "ERROR";
@@ -342,8 +355,7 @@ bool Parser::parse_one()
lv = ::MIR::LValue::make_Argument({ idx });
}
catch(const ::std::exception& e) {
- ::std::cerr << lex << "Invalid argument name - " << name << " - " << e.what() << ::std::endl;
- throw "ERROR";
+ LOG_ERROR(lex << "Invalid argument name - " << name << " - " << e.what());
}
}
// Hard-coded "RETURN" lvalue
@@ -354,8 +366,7 @@ bool Parser::parse_one()
else {
auto it = ::std::find(var_names.begin(), var_names.end(), name);
if( it == var_names.end() ) {
- ::std::cerr << lex << "Cannot find variable named '" << name << "'" << ::std::endl;
- throw "ERROR";
+ LOG_ERROR(lex << "Cannot find variable named '" << name << "'");
}
lv = ::MIR::LValue::make_Local(static_cast<unsigned>(it - var_names.begin()));
}
@@ -366,8 +377,7 @@ bool Parser::parse_one()
lv = ::MIR::LValue( ::std::move(path) );
}
else {
- ::std::cerr << lex << "Unexpected token in LValue - " << lex.next() << ::std::endl;
- throw "ERROR";
+ LOG_ERROR(lex << "Unexpected token in LValue - " << lex.next());
}
for(;;)
{
@@ -437,8 +447,7 @@ bool Parser::parse_one()
}
else
{
- ::std::cerr << p.lex << "Expected an integer or float, got " << p.lex.next() << ::std::endl;
- throw "ERROR";
+ LOG_ERROR(p.lex << "Expected an integer or float, got " << p.lex.next());
}
}
else if( p.lex.consume_if("true") ) {
@@ -453,8 +462,7 @@ bool Parser::parse_one()
return ::MIR::Constant::make_ItemAddr({ ::std::move(path) });
}
else {
- ::std::cerr << p.lex << "BUG? " << p.lex.next() << ::std::endl;
- throw "ERROR";
+ LOG_BUG(p.lex << "BUG? " << p.lex.next());
}
}
@@ -635,8 +643,7 @@ bool Parser::parse_one()
op = ::MIR::eUniOp::NEG;
}
else {
- ::std::cerr << lex << "Unexpected token in uniop - " << lex.next() << ::std::endl;
- throw "ERROR";
+ LOG_ERROR(lex << "Unexpected token in uniop - " << lex.next());
}
auto lv = H::parse_lvalue(*this, var_names);
@@ -661,7 +668,7 @@ bool Parser::parse_one()
case '<':
if( t.strval[1] == '<' )
op = ::MIR::eBinOp::BIT_SHL;
- else if( lex.consume_if('=') )
+ else if( t.strval[1] == '=' )
op = ::MIR::eBinOp::LE;
else
op = ::MIR::eBinOp::LT;
@@ -683,8 +690,7 @@ bool Parser::parse_one()
lex.check_consume('=');
break;
default:
- ::std::cerr << lex << "Unexpected token " << t << " in BINOP" << ::std::endl;
- throw "ERROR";
+ LOG_ERROR(lex << "Unexpected token " << t << " in BINOP");
}
auto lv2 = H::parse_param(*this, var_names);
@@ -705,8 +711,7 @@ bool Parser::parse_one()
src_rval = ::MIR::RValue::make_DstMeta({ ::std::move(lv) });
}
else {
- ::std::cerr << lex << "Unexpected token in RValue - " << lex.next() << ::std::endl;
- throw "ERROR";
+ LOG_ERROR(lex << "Unexpected token in RValue - " << lex.next());
}
stmts.push_back(::MIR::Statement::make_Assign({ ::std::move(dst_val), ::std::move(src_rval) }));
@@ -717,8 +722,7 @@ bool Parser::parse_one()
auto name = ::std::move(lex.consume().strval);
auto df_it = ::std::find(drop_flag_names.begin(), drop_flag_names.end(), name);
if( df_it == drop_flag_names.end() ) {
- ::std::cerr << lex << "Unable to find drop flag '" << name << "'" << ::std::endl;
- throw "ERROR";
+ LOG_ERROR(lex << "Unable to find drop flag '" << name << "'");
}
auto df_idx = static_cast<unsigned>( df_it - drop_flag_names.begin() );
lex.check_consume('=');
@@ -736,8 +740,7 @@ bool Parser::parse_one()
auto name = ::std::move(lex.consume().strval);
df_it = ::std::find(drop_flag_names.begin(), drop_flag_names.end(), name);
if( df_it == drop_flag_names.end() ) {
- ::std::cerr << lex << "Unable to find drop flag '" << name << "'" << ::std::endl;
- throw "ERROR";
+ LOG_ERROR(lex << "Unable to find drop flag '" << name << "'");
}
auto other_idx = static_cast<unsigned>( df_it - drop_flag_names.begin() );
@@ -759,8 +762,7 @@ bool Parser::parse_one()
auto name = ::std::move(lex.consume().strval);
auto df_it = ::std::find(drop_flag_names.begin(), drop_flag_names.end(), name);
if( df_it == drop_flag_names.end() ) {
- ::std::cerr << lex << "Unable to find drop flag '" << name << "'" << ::std::endl;
- throw "ERROR";
+ LOG_ERROR(lex << "Unable to find drop flag '" << name << "'");
}
flag_idx = static_cast<unsigned>( df_it - drop_flag_names.begin() );
}
@@ -824,7 +826,7 @@ bool Parser::parse_one()
break;
}
lex.check_consume(';');
- //::std::cout << stmts.back() << ::std::endl;
+ //LOG_TRACE(stmts.back());
}
lex.check(TokenClass::Ident);
@@ -910,8 +912,7 @@ bool Parser::parse_one()
vals = ::MIR::SwitchValues::make_String(::std::move(values));
}
else {
- ::std::cerr << lex << "Unexpected token for SWITCHVALUE value - " << lex.next() << ::std::endl;
- throw "ERROR";
+ LOG_ERROR(lex << "Unexpected token for SWITCHVALUE value - " << lex.next());
}
lex.check_consume('_');
lex.check_consume('=');
@@ -958,8 +959,7 @@ bool Parser::parse_one()
}
else
{
- ::std::cerr << lex << "Unexpected token at terminator - " << lex.next() << ::std::endl;
- throw "ERROR";
+ LOG_ERROR(lex << "Unexpected token at terminator - " << lex.next());
}
lex.check_consume('}');
@@ -1050,7 +1050,7 @@ bool Parser::parse_one()
}
RawType Parser::parse_core_type()
{
- //::std::cout << lex.next() << ::std::endl;
+ //LOG_TRACE(lex.next());
lex.check(TokenClass::Ident);
auto tok = lex.consume();
// Primitive type.
@@ -1106,8 +1106,7 @@ RawType Parser::parse_core_type()
return RawType::Str;
}
else {
- ::std::cerr << lex << "Unknown core type " << tok << "'" << ::std::endl;
- throw "ERROR";
+ LOG_ERROR(lex << "Unknown core type " << tok << "'");
}
}
::HIR::TypeRef Parser::parse_type()
@@ -1121,19 +1120,8 @@ RawType Parser::parse_core_type()
// Tuples! Should point to a composite
::HIR::GenericPath gp = parse_tuple();
- // Look up this type, then create a TypeRef referring to the type in the datastore
- // - May need to create an unpopulated type?
- auto it = tree.data_types.find(gp);
- if( it == tree.data_types.end() )
- {
- // TODO: Later on need to check if the type is valid.
- auto v = ::std::make_unique<DataType>(DataType {});
- v->my_path = gp;
- auto ir = tree.data_types.insert(::std::make_pair( ::std::move(gp), ::std::move(v)) );
- it = ir.first;
- }
// Good.
- return ::HIR::TypeRef(it->second.get());
+ return ::HIR::TypeRef( this->get_composite(::std::move(gp)) );
}
else if( lex.consume_if('[') )
{
@@ -1142,15 +1130,14 @@ RawType Parser::parse_core_type()
{
size_t size = lex.next().integer();
lex.consume();
- rv.wrappers.insert( rv.wrappers.begin(), { TypeWrapper::Ty::Array, size });
+ lex.check_consume(']');
+ return ::std::move(rv).wrap( TypeWrapper::Ty::Array, size );
}
else
{
- // TODO: How to handle arrays?
- rv.wrappers.insert( rv.wrappers.begin(), { TypeWrapper::Ty::Slice, 0 });
+ lex.check_consume(']');
+ return ::std::move(rv).wrap( TypeWrapper::Ty::Slice, 0 );
}
- lex.check_consume(']');
- return rv;
}
else if( lex.consume_if('!') )
{
@@ -1158,6 +1145,11 @@ RawType Parser::parse_core_type()
}
else if( lex.consume_if('&') )
{
+ if( lex.next() == TokenClass::Lifetime )
+ {
+ // TODO: Handle lifetime names (require them?)
+ lex.consume();
+ }
auto bt = ::HIR::BorrowType::Shared;
if( lex.consume_if("move") )
bt = ::HIR::BorrowType::Move;
@@ -1165,9 +1157,7 @@ RawType Parser::parse_core_type()
bt = ::HIR::BorrowType::Unique;
else
; // keep as shared
- auto rv = parse_type();
- rv.wrappers.insert( rv.wrappers.begin(), { TypeWrapper::Ty::Borrow, static_cast<size_t>(bt) });
- return rv;
+ return parse_type().wrap( TypeWrapper::Ty::Borrow, static_cast<size_t>(bt) );
}
else if( lex.consume_if('*') )
{
@@ -1180,26 +1170,12 @@ RawType Parser::parse_core_type()
; // keep as shared
else
throw "ERROR";
- auto rv = parse_type();
- rv.wrappers.insert( rv.wrappers.begin(), { TypeWrapper::Ty::Pointer, static_cast<size_t>(bt) });
- return rv;
+ return parse_type().wrap( TypeWrapper::Ty::Pointer, static_cast<size_t>(bt) );
}
else if( lex.next() == "::" )
{
auto path = parse_genericpath();
- // Look up this type, then create a TypeRef referring to the type in the datastore
- // - May need to create an unpopulated type?
- auto it = tree.data_types.find(path);
- if( it == tree.data_types.end() )
- {
- // TODO: Later on need to check if the type is valid.
- auto v = ::std::make_unique<DataType>(DataType {});
- v->my_path = path;
- auto ir = tree.data_types.insert(::std::make_pair( ::std::move(path), ::std::move(v)) );
- it = ir.first;
- }
- // Good.
- return ::HIR::TypeRef(it->second.get());
+ return ::HIR::TypeRef( this->get_composite(::std::move(path)));
}
else if( lex.next() == "extern" || lex.next() == "fn" || lex.next() == "unsafe" )
{
@@ -1269,12 +1245,28 @@ RawType Parser::parse_core_type()
::std::vector<::HIR::GenericPath> markers;
while(lex.consume_if('+'))
{
- // TODO: Detect/parse lifetimes?
- markers.push_back(parse_genericpath());
+ if( lex.next() == TokenClass::Lifetime )
+ {
+ // TODO: Include lifetimes in output?
+ lex.consume();
+ }
+ else
+ {
+ markers.push_back(parse_genericpath());
+ }
}
lex.consume_if(')');
- return ::HIR::TypeRef(RawType::TraitObject);
- // TODO: Generate the vtable path and locate that struct
+
+ auto rv = ::HIR::TypeRef(RawType::TraitObject);
+ if( base_trait != ::HIR::GenericPath() )
+ {
+ // Generate vtable path
+ auto vtable_path = base_trait;
+ vtable_path.m_simplepath.ents.back() += "#vtable";
+ // - TODO: Associated types?
+ rv.composite_type = this->get_composite( ::std::move(vtable_path) );
+ }
+ return rv;
}
else if( lex.next() == TokenClass::Ident )
{
@@ -1282,10 +1274,22 @@ RawType Parser::parse_core_type()
}
else
{
- ::std::cerr << lex << "Unexpected token in type - " << lex.next() << ::std::endl;
- throw "ERROR";
+ LOG_ERROR(lex << "Unexpected token in type - " << lex.next());
}
}
+const DataType* Parser::get_composite(::HIR::GenericPath gp)
+{
+ auto it = tree.data_types.find(gp);
+ if( it == tree.data_types.end() )
+ {
+ // TODO: Later on need to check if the type is valid.
+ auto v = ::std::make_unique<DataType>(DataType {});
+ v->my_path = gp;
+ auto ir = tree.data_types.insert(::std::make_pair( ::std::move(gp), ::std::move(v)) );
+ it = ir.first;
+ }
+ return it->second.get();
+}
::HIR::SimplePath ModuleTree::find_lang_item(const char* name) const
{
@@ -1296,8 +1300,7 @@ const Function& ModuleTree::get_function(const ::HIR::Path& p) const
auto it = functions.find(p);
if(it == functions.end())
{
- ::std::cerr << "Unable to find function " << p << " for invoke" << ::std::endl;
- throw "";
+ LOG_ERROR("Unable to find function " << p << " for invoke");
}
return it->second;
}
@@ -1310,17 +1313,16 @@ const Function* ModuleTree::get_function_opt(const ::HIR::Path& p) const
}
return &it->second;
}
-Value& ModuleTree::get_static(const ::HIR::Path& p)
+Static& ModuleTree::get_static(const ::HIR::Path& p)
{
auto it = statics.find(p);
if(it == statics.end())
{
- ::std::cerr << "Unable to find static " << p << " for invoke" << ::std::endl;
- throw "";
+ LOG_ERROR("Unable to find static " << p << " for invoke");
}
return it->second;
}
-Value* ModuleTree::get_static_opt(const ::HIR::Path& p)
+Static* ModuleTree::get_static_opt(const ::HIR::Path& p)
{
auto it = statics.find(p);
if(it == statics.end())
@@ -1328,4 +1330,4 @@ Value* ModuleTree::get_static_opt(const ::HIR::Path& p)
return nullptr;
}
return &it->second;
-} \ No newline at end of file
+}
diff --git a/tools/standalone_miri/module_tree.hpp b/tools/standalone_miri/module_tree.hpp
index ca24b06a..5479d9ef 100644
--- a/tools/standalone_miri/module_tree.hpp
+++ b/tools/standalone_miri/module_tree.hpp
@@ -9,15 +9,14 @@
#include "../../src/mir/mir.hpp"
#include "hir_sim.hpp"
-
-struct Value;
+#include "value.hpp"
struct Function
{
::HIR::Path my_path;
::std::vector<::HIR::TypeRef> args;
::HIR::TypeRef ret_ty;
-
+
// If `link_name` is non-empty, then the function is an external
struct {
::std::string link_name;
@@ -25,6 +24,11 @@ struct Function
} external;
::MIR::Function m_mir;
};
+struct Static
+{
+ ::HIR::TypeRef ty;
+ Value val;
+};
/// Container for loaded code and structures
class ModuleTree
@@ -34,8 +38,7 @@ class ModuleTree
::std::set<::std::string> loaded_files;
::std::map<::HIR::Path, Function> functions;
- ::std::map<::HIR::Path, Value> statics;
- // TODO: statics
+ ::std::map<::HIR::Path, Static> statics;
// Hack: Tuples are stored as `::""::<A,B,C,...>`
::std::map<::HIR::GenericPath, ::std::unique_ptr<DataType>> data_types;
@@ -47,8 +50,8 @@ public:
::HIR::SimplePath find_lang_item(const char* name) const;
const Function& get_function(const ::HIR::Path& p) const;
const Function* get_function_opt(const ::HIR::Path& p) const;
- Value& get_static(const ::HIR::Path& p);
- Value* get_static_opt(const ::HIR::Path& p);
+ Static& get_static(const ::HIR::Path& p);
+ Static* get_static_opt(const ::HIR::Path& p);
const DataType& get_composite(const ::HIR::GenericPath& p) const {
return *data_types.at(p);
diff --git a/tools/standalone_miri/value.cpp b/tools/standalone_miri/value.cpp
index beff8a38..cf378077 100644
--- a/tools/standalone_miri/value.cpp
+++ b/tools/standalone_miri/value.cpp
@@ -1,6 +1,10 @@
-//
-//
-//
+/*
+ * mrustc Standalone MIRI
+ * - by John Hodge (Mutabah)
+ *
+ * value.cpp
+ * - Runtime values
+ */
#include "value.hpp"
#include "hir_sim.hpp"
#include "module_tree.hpp"
@@ -9,49 +13,79 @@
#include <algorithm>
#include "debug.hpp"
-AllocationPtr Allocation::new_alloc(size_t size)
+AllocationHandle Allocation::new_alloc(size_t size)
{
Allocation* rv = new Allocation();
rv->refcount = 1;
rv->data.resize( (size + 8-1) / 8 ); // QWORDS
rv->mask.resize( (size + 8-1) / 8 ); // bitmap bytes
//LOG_DEBUG(rv << " ALLOC");
- return AllocationPtr(rv);
+ return AllocationHandle(rv);
}
-AllocationPtr AllocationPtr::new_fcn(::HIR::Path p)
+AllocationHandle::AllocationHandle(const AllocationHandle& x):
+ m_ptr(x.m_ptr)
{
- AllocationPtr rv;
+ if( m_ptr )
+ {
+ assert(m_ptr->refcount != 0);
+ assert(m_ptr->refcount != SIZE_MAX);
+ m_ptr->refcount += 1;
+ //LOG_DEBUG(m_ptr << " REF++ " << m_ptr->refcount);
+ }
+}
+AllocationHandle::~AllocationHandle()
+{
+ if( m_ptr )
+ {
+ m_ptr->refcount -= 1;
+ //LOG_DEBUG(m_ptr << " REF-- " << m_ptr->refcount);
+ if(m_ptr->refcount == 0)
+ {
+ delete m_ptr;
+ }
+ }
+}
+
+RelocationPtr RelocationPtr::new_alloc(AllocationHandle alloc)
+{
+ RelocationPtr rv;
+ auto* ptr = alloc.m_ptr;
+ alloc.m_ptr = nullptr;
+ rv.m_ptr = reinterpret_cast<void*>( reinterpret_cast<uintptr_t>(ptr) + static_cast<uintptr_t>(Ty::Allocation) );
+ return rv;
+}
+RelocationPtr RelocationPtr::new_fcn(::HIR::Path p)
+{
+ RelocationPtr rv;
auto* ptr = new ::HIR::Path(::std::move(p));
rv.m_ptr = reinterpret_cast<void*>( reinterpret_cast<uintptr_t>(ptr) + static_cast<uintptr_t>(Ty::Function) );
return rv;
}
-AllocationPtr AllocationPtr::new_string(const ::std::string* ptr)
+RelocationPtr RelocationPtr::new_string(const ::std::string* ptr)
{
- AllocationPtr rv;
+ RelocationPtr rv;
rv.m_ptr = reinterpret_cast<void*>( reinterpret_cast<uintptr_t>(ptr) + static_cast<uintptr_t>(Ty::StdString) );
return rv;
}
-AllocationPtr AllocationPtr::new_ffi(FFIPointer info)
+RelocationPtr RelocationPtr::new_ffi(FFIPointer info)
{
- AllocationPtr rv;
+ RelocationPtr rv;
auto* ptr = new FFIPointer(info);
rv.m_ptr = reinterpret_cast<void*>( reinterpret_cast<uintptr_t>(ptr) + static_cast<uintptr_t>(Ty::FfiPointer) );
return rv;
}
-AllocationPtr::AllocationPtr(const AllocationPtr& x):
+RelocationPtr::RelocationPtr(const RelocationPtr& x):
m_ptr(nullptr)
{
if( x )
{
switch(x.get_ty())
{
- case Ty::Allocation:
- m_ptr = x.m_ptr;
- assert(alloc().refcount != 0);
- assert(alloc().refcount != SIZE_MAX);
- alloc().refcount += 1;
- //LOG_DEBUG(&alloc() << " REF++ " << alloc().refcount);
- break;
+ case Ty::Allocation: {
+ auto tmp = AllocationHandle( reinterpret_cast<Allocation*>(x.get_ptr()) );
+ *this = RelocationPtr::new_alloc(tmp);
+ tmp.m_ptr = nullptr;
+ } break;
case Ty::Function: {
auto ptr_i = reinterpret_cast<uintptr_t>(new ::HIR::Path(x.fcn()));
assert( (ptr_i & 3) == 0 );
@@ -75,21 +109,15 @@ AllocationPtr::AllocationPtr(const AllocationPtr& x):
m_ptr = nullptr;
}
}
-AllocationPtr::~AllocationPtr()
+RelocationPtr::~RelocationPtr()
{
if( *this )
{
switch(get_ty())
{
- case Ty::Allocation: {
- auto* ptr = &alloc();
- ptr->refcount -= 1;
- //LOG_DEBUG(&alloc() << " REF-- " << ptr->refcount);
- if(ptr->refcount == 0)
- {
- delete ptr;
- }
- } break;
+ case Ty::Allocation:
+ (void)AllocationHandle( reinterpret_cast<Allocation*>(get_ptr()) );
+ break;
case Ty::Function: {
auto* ptr = const_cast<::HIR::Path*>(&fcn());
delete ptr;
@@ -104,7 +132,7 @@ AllocationPtr::~AllocationPtr()
}
}
}
-size_t AllocationPtr::get_size() const
+size_t RelocationPtr::get_size() const
{
if( !*this )
return 0;
@@ -123,22 +151,22 @@ size_t AllocationPtr::get_size() const
throw "Unreachable";
}
-::std::ostream& operator<<(::std::ostream& os, const AllocationPtr& x)
+::std::ostream& operator<<(::std::ostream& os, const RelocationPtr& x)
{
if( x )
{
switch(x.get_ty())
{
- case AllocationPtr::Ty::Allocation:
+ case RelocationPtr::Ty::Allocation:
os << &x.alloc();
break;
- case AllocationPtr::Ty::Function:
+ case RelocationPtr::Ty::Function:
os << x.fcn();
break;
- case AllocationPtr::Ty::StdString:
+ case RelocationPtr::Ty::StdString:
os << "\"" << x.str() << "\"";
break;
- case AllocationPtr::Ty::FfiPointer:
+ case RelocationPtr::Ty::FfiPointer:
os << "FFI " << x.ffi().source_function << " " << x.ffi().ptr_value;
break;
}
@@ -150,9 +178,93 @@ size_t AllocationPtr::get_size() const
return os;
}
+uint64_t ValueCommonRead::read_usize(size_t ofs) const
+{
+ uint64_t v = 0;
+ this->read_bytes(ofs, &v, POINTER_SIZE);
+ return v;
+}
+void ValueCommonWrite::write_usize(size_t ofs, uint64_t v)
+{
+ this->write_bytes(ofs, &v, POINTER_SIZE);
+}
+void* ValueCommonRead::read_pointer_unsafe(size_t rd_ofs, size_t req_valid, size_t& out_size, bool& out_is_mut) const
+{
+ auto ofs = read_usize(rd_ofs);
+ auto reloc = get_relocation(rd_ofs);
+ if( !reloc )
+ {
+ if( ofs != 0 ) {
+ LOG_FATAL("Read a non-zero offset with no relocation");
+ }
+ if( req_valid > 0 ) {
+ LOG_ERROR("Attempting to read a null pointer");
+ }
+ out_is_mut = false;
+ out_size = 0;
+ return nullptr;
+ }
+ else
+ {
+ switch(reloc.get_ty())
+ {
+ case RelocationPtr::Ty::Allocation: {
+ auto& a = reloc.alloc();
+ if( ofs > a.size() )
+ LOG_FATAL("Out-of-bounds pointer");
+ if( ofs + req_valid > a.size() )
+ LOG_FATAL("Out-of-bounds pointer (" << ofs << " + " << req_valid << " > " << a.size());
+ a.check_bytes_valid( ofs, req_valid );
+ out_size = a.size() - ofs;
+ out_is_mut = true;
+ return a.data_ptr() + ofs;
+ }
+ case RelocationPtr::Ty::StdString: {
+ const auto& s = reloc.str();
+ if( ofs > s.size() )
+ LOG_FATAL("Out-of-bounds pointer");
+ if( ofs + req_valid > s.size() )
+ LOG_FATAL("Out-of-bounds pointer (" << ofs << " + " << req_valid << " > " << s.size());
+ out_size = s.size() - ofs;
+ out_is_mut = false;
+ return const_cast<void*>( static_cast<const void*>(s.data() + ofs) );
+ }
+ case RelocationPtr::Ty::Function:
+ LOG_FATAL("read_pointer w/ function");
+ case RelocationPtr::Ty::FfiPointer: {
+ const auto& f = reloc.ffi();
+ // TODO: Validity?
+ //if( req_valid )
+ // LOG_FATAL("Can't request valid data from a FFI pointer");
+ // TODO: Have an idea of mutability and available size from FFI
+ out_size = f.size - ofs;
+ out_is_mut = false;
+ return reloc.ffi().ptr_value + ofs;
+ }
+ }
+ throw "";
+ }
+}
+ValueRef ValueCommonRead::read_pointer_valref_mut(size_t rd_ofs, size_t size)
+{
+ auto ofs = read_usize(rd_ofs);
+ auto reloc = get_relocation(rd_ofs);
+ if( !reloc )
+ {
+ LOG_ERROR("Getting ValRef to null pointer (no relocation)");
+ }
+ else
+ {
+ // TODO: Validate size
+ return ValueRef(reloc, ofs, size);
+ }
+}
+
void Allocation::resize(size_t new_size)
{
+ if( this->is_freed )
+ LOG_ERROR("Use of freed memory " << this);
//size_t old_size = this->size();
//size_t extra_bytes = (new_size > old_size ? new_size - old_size : 0);
@@ -167,9 +279,9 @@ void Allocation::check_bytes_valid(size_t ofs, size_t size) const
}
for(size_t i = ofs; i < ofs + size; i++)
{
- if( !(this->mask[i/8] & (1 << i%8)) )
+ if( !(this->mask[i/8] & (1 << (i%8))) )
{
- ::std::cerr << "ERROR: Invalid bytes in value" << ::std::endl;
+ LOG_ERROR("Invalid bytes in value - " << ofs << "+" << size << " - " << *this);
throw "ERROR";
}
}
@@ -179,14 +291,18 @@ void Allocation::mark_bytes_valid(size_t ofs, size_t size)
assert( ofs+size <= this->mask.size() * 8 );
for(size_t i = ofs; i < ofs + size; i++)
{
- this->mask[i/8] |= (1 << i%8);
+ this->mask[i/8] |= (1 << (i%8));
}
}
Value Allocation::read_value(size_t ofs, size_t size) const
{
Value rv;
+ TRACE_FUNCTION_R("Allocation::read_value " << this << " " << ofs << "+" << size, *this << " | " << rv);
+ if( this->is_freed )
+ LOG_ERROR("Use of freed memory " << this);
+ LOG_DEBUG(*this);
- // TODO: Determine if this can become an inline allocation.
+ // Determine if this can become an inline allocation.
bool has_reloc = false;
for(const auto& r : this->relocations)
{
@@ -205,7 +321,7 @@ Value Allocation::read_value(size_t ofs, size_t size) const
{
if( ofs <= r.slot_ofs && r.slot_ofs < ofs + size )
{
- rv.allocation.alloc().relocations.push_back({ r.slot_ofs - ofs, r.backing_alloc });
+ rv.allocation->relocations.push_back({ r.slot_ofs - ofs, r.backing_alloc });
}
}
@@ -213,14 +329,12 @@ Value Allocation::read_value(size_t ofs, size_t size) const
for(size_t i = 0; i < size; i ++)
{
size_t j = ofs + i;
- bool v = (this->mask[j/8] & (1 << j%8)) != 0;
+ const uint8_t test_mask = (1 << (j%8));
+ const uint8_t set_mask = (1 << (i%8));
+ bool v = (this->mask[j/8] & test_mask) != 0;
if( v )
{
- rv.allocation.alloc().mask[i/8] |= (1 << i%8);
- }
- else
- {
- rv.allocation.alloc().mask[i/8] &= ~(1 << i%8);
+ rv.allocation->mask[i/8] |= set_mask;
}
}
}
@@ -236,21 +350,23 @@ Value Allocation::read_value(size_t ofs, size_t size) const
for(size_t i = 0; i < size; i ++)
{
size_t j = ofs + i;
- bool v = (this->mask[j/8] & (1 << j%8)) != 0;
+ const uint8_t tst_mask = 1 << (j%8);
+ const uint8_t set_mask = 1 << (i%8);
+ bool v = (this->mask[j/8] & tst_mask) != 0;
if( v )
{
- rv.direct_data.mask[i/8] |= (1 << i%8);
+ rv.direct_data.mask[i/8] |= set_mask;
}
- //else
- //{
- // rv.direct_data.mask[i/8] &= ~(1 << i%8);
- //}
}
}
return rv;
}
void Allocation::read_bytes(size_t ofs, void* dst, size_t count) const
{
+ if( this->is_freed )
+ LOG_ERROR("Use of freed memory " << this);
+
+ LOG_DEBUG("Allocation::read_bytes " << this << " " << ofs << "+" << count);
if(count == 0)
return ;
@@ -273,10 +389,15 @@ void Allocation::read_bytes(size_t ofs, void* dst, size_t count) const
}
void Allocation::write_value(size_t ofs, Value v)
{
+ TRACE_FUNCTION_R("Allocation::write_value " << this << " " << ofs << "+" << v.size() << " " << v, *this);
+ if( this->is_freed )
+ LOG_ERROR("Use of freed memory " << this);
+ //if( this->is_read_only )
+ // LOG_ERROR("Writing to read-only allocation " << this);
if( v.allocation )
{
- size_t v_size = v.allocation.alloc().size();
- const auto& src_alloc = v.allocation.alloc();
+ size_t v_size = v.allocation->size();
+ const auto& src_alloc = *v.allocation;
// Take a copy of the source mask
auto s_mask = src_alloc.mask;
@@ -337,8 +458,15 @@ void Allocation::write_value(size_t ofs, Value v)
}
void Allocation::write_bytes(size_t ofs, const void* src, size_t count)
{
+ //LOG_DEBUG("Allocation::write_bytes " << this << " " << ofs << "+" << count);
+ if( this->is_freed )
+ LOG_ERROR("Use of freed memory " << this);
+ //if( this->is_read_only )
+ // LOG_ERROR("Writing to read-only allocation " << this);
+
if(count == 0)
return ;
+ TRACE_FUNCTION_R("Allocation::write_bytes " << this << " " << ofs << "+" << count, *this);
if(ofs >= this->size() ) {
LOG_ERROR("Out of bounds write, " << ofs << "+" << count << " > " << this->size());
throw "ERROR";
@@ -371,9 +499,10 @@ void Allocation::write_bytes(size_t ofs, const void* src, size_t count)
::std::memcpy(this->data_ptr() + ofs, src, count);
mark_bytes_valid(ofs, count);
}
-void Allocation::write_usize(size_t ofs, uint64_t v)
+void Allocation::write_ptr(size_t ofs, size_t ptr_ofs, RelocationPtr reloc)
{
- this->write_bytes(ofs, &v, POINTER_SIZE);
+ this->write_usize(ofs, ptr_ofs);
+ this->relocations.push_back(Relocation { ofs, /*POINTER_SIZE,*/ ::std::move(reloc) });
}
::std::ostream& operator<<(::std::ostream& os, const Allocation& x)
{
@@ -384,7 +513,7 @@ void Allocation::write_usize(size_t ofs, uint64_t v)
if( i != 0 )
os << " ";
- if( x.mask[i/8] & (1 << i%8) )
+ if( x.mask[i/8] & (1 << (i%8)) )
{
os << ::std::setw(2) << ::std::setfill('0') << (int)x.data_ptr()[i];
}
@@ -416,38 +545,12 @@ Value::Value()
Value::Value(::HIR::TypeRef ty)
{
size_t size = ty.get_size();
-#if 1
+
// Support inline data if the data will fit within the inline region (which is the size of the metadata)
if( ty.get_size() <= sizeof(this->direct_data.data) )
{
- struct H
- {
- static bool has_pointer(const ::HIR::TypeRef& ty)
- {
- if( ty.wrappers.empty() || ::std::all_of(ty.wrappers.begin(), ty.wrappers.end(), [](const auto& x){ return x.type == TypeWrapper::Ty::Array; }) )
- {
- // TODO: Function pointers should be _pointers_
- if( ty.inner_type == RawType::Function )
- {
- return true;
- }
- // Check the inner type
- if( ty.inner_type != RawType::Composite )
- {
- return false;
- }
- // Still not sure, check the inner for any pointers.
- for(const auto& fld : ty.composite_type->fields)
- {
- if( H::has_pointer(fld.second) )
- return true;
- }
- return false;
- }
- return true;
- }
- };
- if( ! H::has_pointer(ty) )
+ // AND the type doesn't contain a pointer (of any kind)
+ if( ! ty.has_pointer() )
{
// Will fit in a inline allocation, nice.
//LOG_TRACE("No pointers in " << ty << ", storing inline");
@@ -457,7 +560,6 @@ Value::Value(::HIR::TypeRef ty)
return ;
}
}
-#endif
// Fallback: Make a new allocation
//LOG_TRACE(" Creating allocation for " << ty);
@@ -482,18 +584,46 @@ Value Value::new_fnptr(const ::HIR::Path& fn_path)
{
Value rv( ::HIR::TypeRef(::HIR::CoreType { RawType::Function }) );
assert(rv.allocation);
- rv.allocation.alloc().relocations.push_back(Relocation { 0, AllocationPtr::new_fcn(fn_path) });
- rv.allocation.alloc().data.at(0) = 0;
- rv.allocation.alloc().mask.at(0) = 0xFF; // TODO: Get pointer size and make that much valid instead of 8 bytes
+ rv.allocation->relocations.push_back(Relocation { 0, RelocationPtr::new_fcn(fn_path) });
+ rv.allocation->data.at(0) = 0;
+ rv.allocation->mask.at(0) = 0xFF; // TODO: Get pointer size and make that much valid instead of 8 bytes
return rv;
}
Value Value::new_ffiptr(FFIPointer ffi)
{
Value rv( ::HIR::TypeRef(::HIR::CoreType { RawType::USize }) );
rv.create_allocation();
- rv.allocation.alloc().relocations.push_back(Relocation { 0, AllocationPtr::new_ffi(ffi) });
- rv.allocation.alloc().data.at(0) = 0;
- rv.allocation.alloc().mask.at(0) = 0xFF; // TODO: Get pointer size and make that much valid instead of 8 bytes
+ rv.allocation->relocations.push_back(Relocation { 0, RelocationPtr::new_ffi(ffi) });
+ rv.allocation->data.at(0) = 0;
+ rv.allocation->mask.at(0) = 0xFF; // TODO: Get pointer size and make that much valid instead of 8 bytes
+ return rv;
+}
+Value Value::new_pointer(::HIR::TypeRef ty, uint64_t v, RelocationPtr r) {
+ assert(ty.get_wrapper());
+ assert(ty.get_wrapper()->type == TypeWrapper::Ty::Borrow || ty.get_wrapper()->type == TypeWrapper::Ty::Pointer);
+ Value rv(ty);
+ rv.write_usize(0, v);
+ rv.allocation->relocations.push_back(Relocation { 0, /*POINTER_SIZE,*/ ::std::move(r) });
+ return rv;
+}
+Value Value::new_usize(uint64_t v) {
+ Value rv( ::HIR::TypeRef(RawType::USize) );
+ rv.write_usize(0, v);
+ return rv;
+}
+Value Value::new_isize(int64_t v) {
+ Value rv( ::HIR::TypeRef(RawType::ISize) );
+ rv.write_isize(0, v);
+ return rv;
+}
+Value Value::new_u32(uint32_t v) {
+ Value rv( ::HIR::TypeRef(RawType::U32) );
+ rv.write_u32(0, v);
+ return rv;
+}
+Value Value::new_i32(int32_t v) {
+ Value rv( ::HIR::TypeRef(RawType::I32) );
+ rv.write_i32(0, v);
return rv;
}
@@ -502,10 +632,10 @@ void Value::create_allocation()
assert(!this->allocation);
this->allocation = Allocation::new_alloc(this->direct_data.size);
if( this->direct_data.size > 0 )
- this->allocation.alloc().mask[0] = this->direct_data.mask[0];
+ this->allocation->mask[0] = this->direct_data.mask[0];
if( this->direct_data.size > 8 )
- this->allocation.alloc().mask[1] = this->direct_data.mask[1];
- ::std::memcpy(this->allocation.alloc().data.data(), this->direct_data.data, this->direct_data.size);
+ this->allocation->mask[1] = this->direct_data.mask[1];
+ ::std::memcpy(this->allocation->data.data(), this->direct_data.data, this->direct_data.size);
}
void Value::check_bytes_valid(size_t ofs, size_t size) const
{
@@ -513,7 +643,7 @@ void Value::check_bytes_valid(size_t ofs, size_t size) const
return ;
if( this->allocation )
{
- this->allocation.alloc().check_bytes_valid(ofs, size);
+ this->allocation->check_bytes_valid(ofs, size);
}
else
{
@@ -542,7 +672,7 @@ void Value::mark_bytes_valid(size_t ofs, size_t size)
{
if( this->allocation )
{
- this->allocation.alloc().mark_bytes_valid(ofs, size);
+ this->allocation->mark_bytes_valid(ofs, size);
}
else
{
@@ -559,7 +689,7 @@ Value Value::read_value(size_t ofs, size_t size) const
//TRACE_FUNCTION_R(ofs << ", " << size << ") - " << *this, rv);
if( this->allocation )
{
- rv = this->allocation.alloc().read_value(ofs, size);
+ rv = this->allocation->read_value(ofs, size);
}
else
{
@@ -577,7 +707,7 @@ void Value::read_bytes(size_t ofs, void* dst, size_t count) const
return ;
if( this->allocation )
{
- this->allocation.alloc().read_bytes(ofs, dst, count);
+ this->allocation->read_bytes(ofs, dst, count);
}
else
{
@@ -605,7 +735,7 @@ void Value::write_bytes(size_t ofs, const void* src, size_t count)
return ;
if( this->allocation )
{
- this->allocation.alloc().write_bytes(ofs, src, count);
+ this->allocation->write_bytes(ofs, src, count);
}
else
{
@@ -626,14 +756,14 @@ void Value::write_value(size_t ofs, Value v)
{
if( this->allocation )
{
- this->allocation.alloc().write_value(ofs, ::std::move(v));
+ this->allocation->write_value(ofs, ::std::move(v));
}
else
{
- if( v.allocation && !v.allocation.alloc().relocations.empty() )
+ if( v.allocation && !v.allocation->relocations.empty() )
{
this->create_allocation();
- this->allocation.alloc().write_value(ofs, ::std::move(v));
+ this->allocation->write_value(ofs, ::std::move(v));
}
else
{
@@ -651,16 +781,20 @@ void Value::write_value(size_t ofs, Value v)
}
}
}
-void Value::write_usize(size_t ofs, uint64_t v)
+void Value::write_ptr(size_t ofs, size_t ptr_ofs, RelocationPtr reloc)
{
- this->write_bytes(ofs, &v, POINTER_SIZE);
+ if( !this->allocation )
+ {
+ LOG_ERROR("Writing a pointer with no allocation");
+ }
+ this->allocation->write_ptr(ofs, ptr_ofs, ::std::move(reloc));
}
::std::ostream& operator<<(::std::ostream& os, const Value& v)
{
if( v.allocation )
{
- os << v.allocation.alloc();
+ os << *v.allocation;
}
else
{
@@ -687,15 +821,15 @@ extern ::std::ostream& operator<<(::std::ostream& os, const ValueRef& v)
{
if( v.m_size == 0 )
return os;
- if( v.m_alloc || v.m_value->allocation )
+ if( v.m_alloc )
{
- const auto& alloc_ptr = v.m_alloc ? v.m_alloc : v.m_value->allocation;
+ const auto& alloc_ptr = v.m_alloc;;
// TODO: What if alloc_ptr isn't a data allocation?
switch(alloc_ptr.get_ty())
{
- case AllocationPtr::Ty::Allocation: {
+ case RelocationPtr::Ty::Allocation: {
const auto& alloc = alloc_ptr.alloc();
-
+
auto flags = os.flags();
os << ::std::hex;
for(size_t i = v.m_offset; i < ::std::min(alloc.size(), v.m_offset + v.m_size); i++)
@@ -724,10 +858,10 @@ extern ::std::ostream& operator<<(::std::ostream& os, const ValueRef& v)
}
os << " }";
} break;
- case AllocationPtr::Ty::Function:
+ case RelocationPtr::Ty::Function:
LOG_TODO("ValueRef to " << alloc_ptr);
break;
- case AllocationPtr::Ty::StdString: {
+ case RelocationPtr::Ty::StdString: {
const auto& s = alloc_ptr.str();
assert(v.m_offset < s.size());
assert(v.m_size < s.size());
@@ -740,12 +874,44 @@ extern ::std::ostream& operator<<(::std::ostream& os, const ValueRef& v)
}
os.setf(flags);
} break;
- case AllocationPtr::Ty::FfiPointer:
+ case RelocationPtr::Ty::FfiPointer:
LOG_TODO("ValueRef to " << alloc_ptr);
break;
}
}
- else
+ else if( v.m_value && v.m_value->allocation )
+ {
+ const auto& alloc = *v.m_value->allocation;
+
+ auto flags = os.flags();
+ os << ::std::hex;
+ for(size_t i = v.m_offset; i < ::std::min(alloc.size(), v.m_offset + v.m_size); i++)
+ {
+ if( i != 0 )
+ os << " ";
+
+ if( alloc.mask[i/8] & (1 << i%8) )
+ {
+ os << ::std::setw(2) << ::std::setfill('0') << (int)alloc.data_ptr()[i];
+ }
+ else
+ {
+ os << "--";
+ }
+ }
+ os.setf(flags);
+
+ os << " {";
+ for(const auto& r : alloc.relocations)
+ {
+ if( v.m_offset <= r.slot_ofs && r.slot_ofs < v.m_offset + v.m_size )
+ {
+ os << " @" << (r.slot_ofs - v.m_offset) << "=" << r.backing_alloc;
+ }
+ }
+ os << " }";
+ }
+ else if( v.m_value )
{
const auto& direct = v.m_value->direct_data;
@@ -766,24 +932,45 @@ extern ::std::ostream& operator<<(::std::ostream& os, const ValueRef& v)
}
os.setf(flags);
}
+ else
+ {
+ // TODO: no value?
+ }
return os;
}
-uint64_t ValueRef::read_usize(size_t ofs) const
+Value ValueRef::read_value(size_t ofs, size_t size) const
{
- uint64_t v = 0;
- this->read_bytes(ofs, &v, POINTER_SIZE);
- return v;
+ if( size == 0 )
+ return Value();
+ if( !(ofs < m_size && size <= m_size && ofs + size <= m_size) ) {
+ LOG_ERROR("Read exceeds bounds, " << ofs << " + " << size << " > " << m_size << " - from " << *this);
+ }
+ if( m_alloc ) {
+ switch(m_alloc.get_ty())
+ {
+ case RelocationPtr::Ty::Allocation:
+ return m_alloc.alloc().read_value(m_offset + ofs, size);
+ case RelocationPtr::Ty::StdString: {
+ auto rv = Value::with_size(size, false);
+ //ASSERT_BUG(ofs <= m_alloc.str().size(), "");
+ //ASSERT_BUG(size <= m_alloc.str().size(), "");
+ //ASSERT_BUG(ofs+size <= m_alloc.str().size(), "");
+ assert(m_offset+ofs <= m_alloc.str().size() && size <= m_alloc.str().size() && m_offset+ofs+size <= m_alloc.str().size());
+ rv.write_bytes(0, m_alloc.str().data() + m_offset + ofs, size);
+ return rv;
+ }
+ default:
+ //ASSERT_BUG(m_alloc.is_alloc(), "read_value on non-data backed Value - " << );
+ throw "TODO";
+ }
+ }
+ else {
+ return m_value->read_value(m_offset + ofs, size);
+ }
}
-uint64_t Value::read_usize(size_t ofs) const
+bool ValueRef::compare(const void* other, size_t other_len) const
{
- uint64_t v = 0;
- this->read_bytes(ofs, &v, POINTER_SIZE);
- return v;
+ check_bytes_valid(0, other_len);
+ return ::std::memcmp(data_ptr(), other, other_len) == 0;
}
-uint64_t Allocation::read_usize(size_t ofs) const
-{
- uint64_t v = 0;
- this->read_bytes(ofs, &v, POINTER_SIZE);
- return v;
-} \ No newline at end of file
diff --git a/tools/standalone_miri/value.hpp b/tools/standalone_miri/value.hpp
index 8b103210..b057b3c4 100644
--- a/tools/standalone_miri/value.hpp
+++ b/tools/standalone_miri/value.hpp
@@ -1,11 +1,16 @@
-//
-//
-//
+/*
+ * mrustc Standalone MIRI
+ * - by John Hodge (Mutabah)
+ *
+ * value.hpp
+ * - Runtime values
+ */
#pragma once
#include <vector>
#include <memory>
#include <cstdint>
+#include <cstring> // memcpy
#include <cassert>
namespace HIR {
@@ -14,19 +19,55 @@ namespace HIR {
}
class Allocation;
struct Value;
+struct ValueRef;
struct FFIPointer
{
const char* source_function;
void* ptr_value;
+ size_t size;
};
-class AllocationPtr
+class AllocationHandle
{
friend class Allocation;
- void* m_ptr;
+ friend class RelocationPtr;
+ Allocation* m_ptr;
+
+private:
+ AllocationHandle(Allocation* p):
+ m_ptr(p)
+ {
+ }
public:
+ AllocationHandle(): m_ptr(nullptr) {}
+ AllocationHandle(AllocationHandle&& x): m_ptr(x.m_ptr) {
+ x.m_ptr = nullptr;
+ }
+ AllocationHandle(const AllocationHandle& x);
+ ~AllocationHandle();
+ AllocationHandle& operator=(const AllocationHandle& x) = delete;
+ AllocationHandle& operator=(AllocationHandle&& x) {
+ this->~AllocationHandle();
+ this->m_ptr = x.m_ptr;
+ x.m_ptr = nullptr;
+ return *this;
+ }
+
+ operator bool() const { return m_ptr != 0; }
+ const Allocation& operator*() const { assert(m_ptr); return *m_ptr; }
+ Allocation& operator*() { assert(m_ptr); return *m_ptr; }
+ const Allocation* operator->() const { assert(m_ptr); return m_ptr; }
+ Allocation* operator->() { assert(m_ptr); return m_ptr; }
+};
+
+// TODO: Split into RelocationPtr and AllocationHandle
+class RelocationPtr
+{
+ void* m_ptr;
+
+public:
enum class Ty
{
Allocation,
@@ -35,26 +76,20 @@ public:
FfiPointer,
};
-private:
- AllocationPtr(Allocation* p):
- m_ptr(p)
- {
- }
-public:
- AllocationPtr(): m_ptr(nullptr) {}
- AllocationPtr(AllocationPtr&& x): m_ptr(x.m_ptr) {
+ RelocationPtr(): m_ptr(nullptr) {}
+ RelocationPtr(RelocationPtr&& x): m_ptr(x.m_ptr) {
x.m_ptr = nullptr;
}
- AllocationPtr(const AllocationPtr& x);
- ~AllocationPtr();
- static AllocationPtr new_fcn(::HIR::Path p);
- //static AllocationPtr new_rawdata(const void* buf, size_t len);
- static AllocationPtr new_string(const ::std::string* s); // NOTE: The string must have a stable pointer
- static AllocationPtr new_ffi(FFIPointer info);
-
- AllocationPtr& operator=(const AllocationPtr& x) = delete;
- AllocationPtr& operator=(AllocationPtr&& x) {
- this->~AllocationPtr();
+ RelocationPtr(const RelocationPtr& x);
+ ~RelocationPtr();
+ static RelocationPtr new_alloc(AllocationHandle h);
+ static RelocationPtr new_fcn(::HIR::Path p);
+ static RelocationPtr new_string(const ::std::string* s); // NOTE: The string must have a stable pointer
+ static RelocationPtr new_ffi(FFIPointer info);
+
+ RelocationPtr& operator=(const RelocationPtr& x) = delete;
+ RelocationPtr& operator=(RelocationPtr&& x) {
+ this->~RelocationPtr();
this->m_ptr = x.m_ptr;
x.m_ptr = nullptr;
return *this;
@@ -96,7 +131,7 @@ public:
return static_cast<Ty>( reinterpret_cast<uintptr_t>(m_ptr) & 3 );
}
- friend ::std::ostream& operator<<(::std::ostream& os, const AllocationPtr& x);
+ friend ::std::ostream& operator<<(::std::ostream& os, const RelocationPtr& x);
private:
void* get_ptr() const {
return reinterpret_cast<void*>( reinterpret_cast<uintptr_t>(m_ptr) & ~3 );
@@ -107,15 +142,77 @@ struct Relocation
// Offset within parent allocation where this relocation is performed.
// TODO: Size?
size_t slot_ofs;
- AllocationPtr backing_alloc;
+ RelocationPtr backing_alloc;
+};
+
+// TODO: Split write and read
+struct ValueCommonRead
+{
+ virtual RelocationPtr get_relocation(size_t ofs) const = 0;
+ virtual void read_bytes(size_t ofs, void* dst, size_t count) const = 0;
+
+ uint8_t read_u8(size_t ofs) const { uint8_t rv; read_bytes(ofs, &rv, 1); return rv; }
+ uint16_t read_u16(size_t ofs) const { uint16_t rv; read_bytes(ofs, &rv, 2); return rv; }
+ uint32_t read_u32(size_t ofs) const { uint32_t rv; read_bytes(ofs, &rv, 4); return rv; }
+ uint64_t read_u64(size_t ofs) const { uint64_t rv; read_bytes(ofs, &rv, 8); return rv; }
+ int8_t read_i8(size_t ofs) const { return static_cast<int8_t>(read_u8(ofs)); }
+ int16_t read_i16(size_t ofs) const { return static_cast<int16_t>(read_u16(ofs)); }
+ int32_t read_i32(size_t ofs) const { return static_cast<int32_t>(read_u32(ofs)); }
+ int64_t read_i64(size_t ofs) const { return static_cast<int64_t>(read_u64(ofs)); }
+ float read_f32(size_t ofs) const { float rv; read_bytes(ofs, &rv, 4); return rv; }
+ double read_f64(size_t ofs) const { double rv; read_bytes(ofs, &rv, 8); return rv; }
+ uint64_t read_usize(size_t ofs) const;
+ int64_t read_isize(size_t ofs) const { return static_cast<int64_t>(read_usize(ofs)); }
+
+ /// Read a pointer from the value, requiring at least `req_valid` valid bytes, saves avaliable space in `size`
+ void* read_pointer_unsafe(size_t rd_ofs, size_t req_valid, size_t& size, bool& is_mut) const;
+ /// Read a pointer, requiring `req_len` valid bytes
+ const void* read_pointer_const(size_t rd_ofs, size_t req_len) const {
+ size_t tmp;
+ bool is_mut;
+ return read_pointer_unsafe(rd_ofs, req_len, tmp, is_mut);
+ }
+ /// Read a pointer, not requiring that the target be initialised
+ void* read_pointer_uninit(size_t rd_ofs, size_t& out_size) {
+ bool is_mut;
+ void* rv = read_pointer_unsafe(rd_ofs, 0, out_size, is_mut);
+ if(!is_mut)
+ throw "";
+ //LOG_FATAL("Attempting to get an uninit pointer to immutable data");
+ return rv;
+ }
+ /// Read a pointer and return a ValueRef to it (mutable data)
+ ValueRef read_pointer_valref_mut(size_t rd_ofs, size_t size);
};
-class Allocation
+struct ValueCommonWrite:
+ public ValueCommonRead
{
- friend class AllocationPtr;
+ virtual void write_bytes(size_t ofs, const void* src, size_t count) = 0;
+
+ void write_u8 (size_t ofs, uint8_t v) { write_bytes(ofs, &v, 1); }
+ void write_u16(size_t ofs, uint16_t v) { write_bytes(ofs, &v, 2); }
+ void write_u32(size_t ofs, uint32_t v) { write_bytes(ofs, &v, 4); }
+ void write_u64(size_t ofs, uint64_t v) { write_bytes(ofs, &v, 8); }
+ void write_i8 (size_t ofs, int8_t v) { write_u8 (ofs, static_cast<uint8_t >(v)); }
+ void write_i16(size_t ofs, int16_t v) { write_u16(ofs, static_cast<uint16_t>(v)); }
+ void write_i32(size_t ofs, int32_t v) { write_u32(ofs, static_cast<uint32_t>(v)); }
+ void write_i64(size_t ofs, int64_t v) { write_u64(ofs, static_cast<uint64_t>(v)); }
+ void write_f32(size_t ofs, float v) { write_bytes(ofs, &v, 4); }
+ void write_f64(size_t ofs, double v) { write_bytes(ofs, &v, 8); }
+ void write_usize(size_t ofs, uint64_t v);
+ void write_isize(size_t ofs, int64_t v) { write_usize(ofs, static_cast<uint64_t>(v)); }
+ virtual void write_ptr(size_t ofs, size_t ptr_ofs, RelocationPtr reloc) = 0;
+};
+
+class Allocation:
+ public ValueCommonWrite
+{
+ friend class AllocationHandle;
size_t refcount;
// TODO: Read-only flag?
+ bool is_freed = false;
public:
- static AllocationPtr new_alloc(size_t size);
+ static AllocationHandle new_alloc(size_t size);
const uint8_t* data_ptr() const { return reinterpret_cast<const uint8_t*>(this->data.data()); }
uint8_t* data_ptr() { return reinterpret_cast< uint8_t*>(this->data.data()); }
@@ -125,17 +222,19 @@ public:
::std::vector<uint8_t> mask;
::std::vector<Relocation> relocations;
- AllocationPtr get_relocation(size_t ofs) const {
+ RelocationPtr get_relocation(size_t ofs) const override {
for(const auto& r : relocations) {
if(r.slot_ofs == ofs)
return r.backing_alloc;
}
- return AllocationPtr();
+ return RelocationPtr();
+ }
+ void mark_as_freed() {
+ is_freed = true;
+ relocations.clear();
+ for(auto& v : mask)
+ v = 0;
}
- //void mark_as_freed() {
- // for(auto& v : mask)
- // v = 0;
- //}
void resize(size_t new_size);
@@ -143,127 +242,102 @@ public:
void mark_bytes_valid(size_t ofs, size_t size);
Value read_value(size_t ofs, size_t size) const;
- void read_bytes(size_t ofs, void* dst, size_t count) const;
+ void read_bytes(size_t ofs, void* dst, size_t count) const override;
void write_value(size_t ofs, Value v);
- void write_bytes(size_t ofs, const void* src, size_t count);
-
- // TODO: Make this block common
- void write_u8 (size_t ofs, uint8_t v) { write_bytes(ofs, &v, 1); }
- void write_u16(size_t ofs, uint16_t v) { write_bytes(ofs, &v, 2); }
- void write_u32(size_t ofs, uint32_t v) { write_bytes(ofs, &v, 4); }
- void write_u64(size_t ofs, uint64_t v) { write_bytes(ofs, &v, 8); }
- void write_i8 (size_t ofs, int8_t v) { write_u8 (ofs, static_cast<uint8_t >(v)); }
- void write_i16(size_t ofs, int16_t v) { write_u16(ofs, static_cast<uint16_t>(v)); }
- void write_i32(size_t ofs, int32_t v) { write_u32(ofs, static_cast<uint32_t>(v)); }
- void write_i64(size_t ofs, int64_t v) { write_u64(ofs, static_cast<uint64_t>(v)); }
- void write_f32(size_t ofs, float v) { write_bytes(ofs, &v, 4); }
- void write_f64(size_t ofs, double v) { write_bytes(ofs, &v, 8); }
- void write_usize(size_t ofs, uint64_t v);
- void write_isize(size_t ofs, int64_t v) { write_usize(ofs, static_cast<uint64_t>(v)); }
-
- uint8_t read_u8(size_t ofs) const { uint8_t rv; read_bytes(ofs, &rv, 1); return rv; }
- uint16_t read_u16(size_t ofs) const { uint16_t rv; read_bytes(ofs, &rv, 2); return rv; }
- uint32_t read_u32(size_t ofs) const { uint32_t rv; read_bytes(ofs, &rv, 4); return rv; }
- uint64_t read_u64(size_t ofs) const { uint64_t rv; read_bytes(ofs, &rv, 8); return rv; }
- int8_t read_i8(size_t ofs) const { return static_cast<int8_t>(read_u8(ofs)); }
- int16_t read_i16(size_t ofs) const { return static_cast<int16_t>(read_u16(ofs)); }
- int32_t read_i32(size_t ofs) const { return static_cast<int32_t>(read_u32(ofs)); }
- int64_t read_i64(size_t ofs) const { return static_cast<int64_t>(read_u64(ofs)); }
- float read_f32(size_t ofs) const { float rv; read_bytes(ofs, &rv, 4); return rv; }
- double read_f64(size_t ofs) const { double rv; read_bytes(ofs, &rv, 8); return rv; }
- uint64_t read_usize(size_t ofs) const;
- int64_t read_isize(size_t ofs) const { return static_cast<int64_t>(read_usize(ofs)); }
+ void write_bytes(size_t ofs, const void* src, size_t count) override;
+ void write_ptr(size_t ofs, size_t ptr_ofs, RelocationPtr reloc) override;
};
extern ::std::ostream& operator<<(::std::ostream& os, const Allocation& x);
-struct Value
+struct Value:
+ public ValueCommonWrite
{
// If NULL, data is direct
- AllocationPtr allocation;
+ AllocationHandle allocation;
struct {
- uint8_t data[2*sizeof(size_t)-3]; // 16-3 = 13, fits in 16 bits of mask
+ // NOTE: Can't pack the mask+size tighter, need 4 bits of size (8-15) leaving 12 bits of mask
+ uint8_t data[2*8-3]; // 13 data bytes, plus 16bit mask, plus size = 16 bytes
uint8_t mask[2];
uint8_t size;
} direct_data;
Value();
Value(::HIR::TypeRef ty);
+
static Value with_size(size_t size, bool have_allocation);
static Value new_fnptr(const ::HIR::Path& fn_path);
static Value new_ffiptr(FFIPointer ffi);
+ static Value new_pointer(::HIR::TypeRef ty, uint64_t v, RelocationPtr r);
+ static Value new_usize(uint64_t v);
+ static Value new_isize(int64_t v);
+ static Value new_u32(uint32_t v);
+ static Value new_i32(int32_t v);
void create_allocation();
- size_t size() const { return allocation ? allocation.alloc().size() : direct_data.size; }
+ size_t size() const { return allocation ? allocation->size() : direct_data.size; }
+ const uint8_t* data_ptr() const { return allocation ? allocation->data_ptr() : direct_data.data; }
+ uint8_t* data_ptr() { return allocation ? allocation->data_ptr() : direct_data.data; }
+
+ RelocationPtr get_relocation(size_t ofs) const override {
+ if( this->allocation && this->allocation )
+ return this->allocation->get_relocation(ofs);
+ else
+ return RelocationPtr();
+ }
void check_bytes_valid(size_t ofs, size_t size) const;
void mark_bytes_valid(size_t ofs, size_t size);
Value read_value(size_t ofs, size_t size) const;
- void read_bytes(size_t ofs, void* dst, size_t count) const;
+ void read_bytes(size_t ofs, void* dst, size_t count) const override;
void write_value(size_t ofs, Value v);
- void write_bytes(size_t ofs, const void* src, size_t count);
+ void write_bytes(size_t ofs, const void* src, size_t count) override;
- // TODO: Make this block common
- void write_u8 (size_t ofs, uint8_t v) { write_bytes(ofs, &v, 1); }
- void write_u16(size_t ofs, uint16_t v) { write_bytes(ofs, &v, 2); }
- void write_u32(size_t ofs, uint32_t v) { write_bytes(ofs, &v, 4); }
- void write_u64(size_t ofs, uint64_t v) { write_bytes(ofs, &v, 8); }
- void write_i8 (size_t ofs, int8_t v) { write_u8 (ofs, static_cast<uint8_t >(v)); }
- void write_i16(size_t ofs, int16_t v) { write_u16(ofs, static_cast<uint16_t>(v)); }
- void write_i32(size_t ofs, int32_t v) { write_u32(ofs, static_cast<uint32_t>(v)); }
- void write_i64(size_t ofs, int64_t v) { write_u64(ofs, static_cast<uint64_t>(v)); }
- void write_f32(size_t ofs, float v) { write_bytes(ofs, &v, 4); }
- void write_f64(size_t ofs, double v) { write_bytes(ofs, &v, 8); }
- void write_usize(size_t ofs, uint64_t v);
- void write_isize(size_t ofs, int64_t v) { write_usize(ofs, static_cast<uint64_t>(v)); }
-
- uint8_t read_u8(size_t ofs) const { uint8_t rv; read_bytes(ofs, &rv, 1); return rv; }
- uint16_t read_u16(size_t ofs) const { uint16_t rv; read_bytes(ofs, &rv, 2); return rv; }
- uint32_t read_u32(size_t ofs) const { uint32_t rv; read_bytes(ofs, &rv, 4); return rv; }
- uint64_t read_u64(size_t ofs) const { uint64_t rv; read_bytes(ofs, &rv, 8); return rv; }
- int8_t read_i8(size_t ofs) const { return static_cast<int8_t>(read_u8(ofs)); }
- int16_t read_i16(size_t ofs) const { return static_cast<int16_t>(read_u16(ofs)); }
- int32_t read_i32(size_t ofs) const { return static_cast<int32_t>(read_u32(ofs)); }
- int64_t read_i64(size_t ofs) const { return static_cast<int64_t>(read_u64(ofs)); }
- float read_f32(size_t ofs) const { float rv; read_bytes(ofs, &rv, 4); return rv; }
- double read_f64(size_t ofs) const { double rv; read_bytes(ofs, &rv, 8); return rv; }
- uint64_t read_usize(size_t ofs) const;
- int64_t read_isize(size_t ofs) const { return static_cast<int64_t>(read_usize(ofs)); }
+ void write_ptr(size_t ofs, size_t ptr_ofs, RelocationPtr reloc) override;
};
extern ::std::ostream& operator<<(::std::ostream& os, const Value& v);
// A read-only reference to a value (to write, you have to go through it)
-struct ValueRef
+struct ValueRef:
+ public ValueCommonRead
{
- // Either an AllocationPtr, or a Value pointer
- AllocationPtr m_alloc;
+ // Either an AllocationHandle, or a Value pointer
+ RelocationPtr m_alloc;
Value* m_value;
size_t m_offset; // Offset within the value
size_t m_size; // Size in bytes of the referenced value
::std::shared_ptr<Value> m_metadata;
- ValueRef(AllocationPtr ptr, size_t ofs, size_t size):
+ ValueRef(RelocationPtr ptr, size_t ofs, size_t size):
m_alloc(ptr),
m_value(nullptr),
m_offset(ofs),
m_size(size)
{
- switch(m_alloc.get_ty())
+ if( m_alloc )
{
- case AllocationPtr::Ty::Allocation:
- assert(ofs < m_alloc.alloc().size());
- assert(size <= m_alloc.alloc().size());
- assert(ofs+size <= m_alloc.alloc().size());
- break;
- case AllocationPtr::Ty::StdString:
- assert(ofs < m_alloc.str().size());
- assert(size <= m_alloc.str().size());
- assert(ofs+size <= m_alloc.str().size());
- break;
- default:
- throw "TODO";
+ switch(m_alloc.get_ty())
+ {
+ case RelocationPtr::Ty::Allocation:
+ assert(ofs < m_alloc.alloc().size());
+ assert(size <= m_alloc.alloc().size());
+ assert(ofs+size <= m_alloc.alloc().size());
+ break;
+ case RelocationPtr::Ty::StdString:
+ assert(ofs < m_alloc.str().size());
+ assert(size <= m_alloc.str().size());
+ assert(ofs+size <= m_alloc.str().size());
+ break;
+ case RelocationPtr::Ty::FfiPointer:
+ assert(ofs < m_alloc.ffi().size);
+ assert(size <= m_alloc.ffi().size);
+ assert(ofs+size <= m_alloc.ffi().size);
+ break;
+ default:
+ throw "TODO";
+ }
}
}
ValueRef(Value& val):
@@ -277,56 +351,70 @@ struct ValueRef
{
}
- AllocationPtr get_relocation(size_t ofs) const {
+ RelocationPtr get_relocation(size_t ofs) const override {
if(m_alloc)
{
if( m_alloc.is_alloc() )
return m_alloc.alloc().get_relocation(ofs);
else
- return AllocationPtr();
+ return RelocationPtr();
}
- else if( m_value->allocation )
+ else if( m_value )
{
- if( m_value->allocation.is_alloc() )
- return m_value->allocation.alloc().get_relocation(ofs);
- else
- return AllocationPtr();
+ return m_value->get_relocation(ofs);
}
else
{
- return AllocationPtr();
+ return RelocationPtr();
+ }
+ }
+ Value read_value(size_t ofs, size_t size) const;
+ const uint8_t* data_ptr() const {
+ if( m_alloc ) {
+ switch(m_alloc.get_ty())
+ {
+ case RelocationPtr::Ty::Allocation:
+ return m_alloc.alloc().data_ptr() + m_offset;
+ break;
+ case RelocationPtr::Ty::StdString:
+ return reinterpret_cast<const uint8_t*>(m_alloc.str().data() + m_offset);
+ default:
+ throw "TODO";
+ }
+ }
+ else if( m_value ) {
+ return m_value->data_ptr() + m_offset;
+ }
+ else {
+ return nullptr;
}
}
- Value read_value(size_t ofs, size_t size) const {
+ void read_bytes(size_t ofs, void* dst, size_t size) const {
if( size == 0 )
- return Value();
+ return ;
assert(ofs < m_size);
assert(size <= m_size);
assert(ofs+size <= m_size);
if( m_alloc ) {
switch(m_alloc.get_ty())
{
- case AllocationPtr::Ty::Allocation:
- return m_alloc.alloc().read_value(m_offset + ofs, size);
- case AllocationPtr::Ty::StdString: {
- auto rv = Value::with_size(size, false);
- //ASSERT_BUG(ofs <= m_alloc.str().size(), "");
- //ASSERT_BUG(size <= m_alloc.str().size(), "");
- //ASSERT_BUG(ofs+size <= m_alloc.str().size(), "");
+ case RelocationPtr::Ty::Allocation:
+ m_alloc.alloc().read_bytes(m_offset + ofs, dst, size);
+ break;
+ case RelocationPtr::Ty::StdString:
assert(m_offset+ofs <= m_alloc.str().size() && size <= m_alloc.str().size() && m_offset+ofs+size <= m_alloc.str().size());
- rv.write_bytes(0, m_alloc.str().data() + m_offset + ofs, size);
- return rv;
- }
+ ::std::memcpy(dst, m_alloc.str().data() + m_offset + ofs, size);
+ break;
default:
//ASSERT_BUG(m_alloc.is_alloc(), "read_value on non-data backed Value - " << );
throw "TODO";
}
}
else {
- return m_value->read_value(m_offset + ofs, size);
+ m_value->read_bytes(m_offset + ofs, dst, size);
}
}
- void read_bytes(size_t ofs, void* dst, size_t size) const {
+ void check_bytes_valid(size_t ofs, size_t size) const {
if( size == 0 )
return ;
assert(ofs < m_size);
@@ -335,12 +423,11 @@ struct ValueRef
if( m_alloc ) {
switch(m_alloc.get_ty())
{
- case AllocationPtr::Ty::Allocation:
- m_alloc.alloc().read_bytes(m_offset + ofs, dst, size);
+ case RelocationPtr::Ty::Allocation:
+ m_alloc.alloc().check_bytes_valid(m_offset + ofs, size);
break;
- case AllocationPtr::Ty::StdString:
+ case RelocationPtr::Ty::StdString:
assert(m_offset+ofs <= m_alloc.str().size() && size <= m_alloc.str().size() && m_offset+ofs+size <= m_alloc.str().size());
- ::std::memcpy(dst, m_alloc.str().data() + m_offset + ofs, size);
break;
default:
//ASSERT_BUG(m_alloc.is_alloc(), "read_value on non-data backed Value - " << );
@@ -348,20 +435,10 @@ struct ValueRef
}
}
else {
- m_value->read_bytes(m_offset + ofs, dst, size);
+ m_value->check_bytes_valid(m_offset + ofs, size);
}
}
- uint8_t read_u8(size_t ofs) const { uint8_t rv; read_bytes(ofs, &rv, 1); return rv; }
- uint16_t read_u16(size_t ofs) const { uint16_t rv; read_bytes(ofs, &rv, 2); return rv; }
- uint32_t read_u32(size_t ofs) const { uint32_t rv; read_bytes(ofs, &rv, 4); return rv; }
- uint64_t read_u64(size_t ofs) const { uint64_t rv; read_bytes(ofs, &rv, 8); return rv; }
- int8_t read_i8(size_t ofs) const { return static_cast<int8_t>(read_u8(ofs)); }
- int16_t read_i16(size_t ofs) const { return static_cast<int16_t>(read_u16(ofs)); }
- int32_t read_i32(size_t ofs) const { return static_cast<int32_t>(read_u32(ofs)); }
- int64_t read_i64(size_t ofs) const { return static_cast<int64_t>(read_u64(ofs)); }
- float read_f32(size_t ofs) const { float rv; read_bytes(ofs, &rv, 4); return rv; }
- double read_f64(size_t ofs) const { double rv; read_bytes(ofs, &rv, 8); return rv; }
- uint64_t read_usize(size_t ofs) const;
- int64_t read_isize(size_t ofs) const { return static_cast<int64_t>(read_usize(ofs)); }
+
+ bool compare(const void* other, size_t other_len) const;
};
extern ::std::ostream& operator<<(::std::ostream& os, const ValueRef& v);
diff --git a/tools/testrunner/main.cpp b/tools/testrunner/main.cpp
index 63391fc5..f21dc7fa 100644
--- a/tools/testrunner/main.cpp
+++ b/tools/testrunner/main.cpp
@@ -93,6 +93,11 @@ bool run_compiler(const ::helpers::path& source_file, const ::helpers::path& out
{
::std::vector<const char*> args;
args.push_back("mrustc");
+
+ // Force optimised and debuggable
+ args.push_back("-O");
+ args.push_back("-g");
+
args.push_back("-L");
args.push_back("output");
if(libdir.is_valid())
@@ -336,6 +341,12 @@ int main(int argc, const char* argv[])
continue;
}
+ // If there's no pre-build files (dependencies), clear the dependency path (cleaner output)
+ if( test.m_pre_build.empty() )
+ {
+ depdir = ::helpers::path();
+ }
+
auto compile_logfile = outdir / test.m_name + "-build.log";
if( !run_compiler(test.m_path, outfile, test.m_extra_flags, depdir) )
{
@@ -355,12 +366,27 @@ int main(int argc, const char* argv[])
if( !run_executable(outfile, { outfile.str().c_str() }, run_out_file) )
{
DEBUG("RUN FAIL " << test.m_name);
+
+ // Move the failing output file
+ auto fail_file = run_out_file + "_failed";
+ remove(fail_file.str().c_str());
+ rename(run_out_file.str().c_str(), fail_file.str().c_str());
+ DEBUG("- Output in " << fail_file);
+
n_fail ++;
if( opts.fail_fast )
return 1;
else
continue;
}
+
+#ifdef __linux__
+ // Run `strip` on the test (if on linux)
+ // XXX: Make this cleaner, or remove the need for it (by dynamic linking libstd)
+ if( !run_executable("/usr/bin/strip", { "strip", outfile.str().c_str() }, "/dev/null") )
+ {
+ }
+#endif
}
else
{
@@ -528,10 +554,11 @@ bool run_executable(const ::helpers::path& exe_name, const ::std::vector<const c
auto argv = args;
argv.push_back(nullptr);
pid_t pid;
+ extern char** environ;
int rv = posix_spawn(&pid, exe_name.str().c_str(), &file_actions, nullptr, const_cast<char**>(argv.data()), environ);
if( rv != 0 )
{
- DEBUG("Error in posix_spawn - " << rv);
+ DEBUG("Error in posix_spawn of " << exe_name << " - " << rv);
return false;
}