Skip to content

Commit

Permalink
Merge pull request #1010 from AntelopeIO/GH-984-whitelist-eos-vm-oc-l…
Browse files Browse the repository at this point in the history
…imits

Remove subjective eos-vm-oc limits for whitelisted accounts
  • Loading branch information
heifner authored Nov 8, 2024
2 parents d40dcf9 + d429989 commit 854c770
Show file tree
Hide file tree
Showing 12 changed files with 169 additions and 116 deletions.
6 changes: 5 additions & 1 deletion libraries/chain/apply_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1090,6 +1090,10 @@ action_name apply_context::get_sender() const {
return action_name();
}

bool apply_context::is_eos_vm_oc_whitelisted() const {
return receiver.prefix() == config::system_account_name; // "eosio"_n
}

// Context | OC?
//-------------------------------------------------------------------------------
// Building block | baseline, OC for eosio.*
Expand All @@ -1099,7 +1103,7 @@ action_name apply_context::get_sender() const {
// Compute trx | baseline, OC for eosio.*
// Read only trx | OC
bool apply_context::should_use_eos_vm_oc()const {
return receiver.prefix() == config::system_account_name // "eosio"_n, all cases use OC
return is_eos_vm_oc_whitelisted() // all whitelisted accounts use OC always
|| (is_applying_block() && !control.is_producer_node()) // validating/applying block
|| trx_context.is_read_only();
}
Expand Down
1 change: 1 addition & 0 deletions libraries/chain/include/eosio/chain/apply_context.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -601,6 +601,7 @@ class apply_context {
action_name get_sender() const;

bool is_applying_block() const { return trx_context.explicit_billed_cpu_time; }
bool is_eos_vm_oc_whitelisted() const;
bool should_use_eos_vm_oc()const;

/// Fields:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <boost/multi_index/sequenced_index.hpp>
#include <boost/multi_index/composite_key.hpp>
#include <boost/multi_index/key_extractors.hpp>
#include <boost/multi_index/member.hpp>

#include <boost/interprocess/mem_algo/rbtree_best_fit.hpp>
#include <boost/asio/local/datagram_protocol.hpp>
Expand All @@ -24,6 +25,17 @@ namespace std {
};
}

namespace boost {
template<> struct hash<eosio::chain::eosvmoc::code_tuple> {
size_t operator()(const eosio::chain::eosvmoc::code_tuple& ct) const {
std::size_t seed = 0;
boost::hash_combine(seed, ct.code_id._hash[0]);
boost::hash_combine(seed, ct.vm_version);
return seed;
}
};
}

namespace eosio { namespace chain { namespace eosvmoc {

using namespace boost::multi_index;
Expand All @@ -45,6 +57,13 @@ class code_cache_base {

void free_code(const digest_type& code_id, const uint8_t& vm_version);

// mode for get_descriptor_for_code calls
struct mode {
bool whitelisted = false;
bool high_priority = false;
bool write_window = true;
};

// get_descriptor_for_code failure reasons
enum class get_cd_failure {
temporary, // oc compile not done yet, users like read-only trxs can retry
Expand Down Expand Up @@ -80,15 +99,11 @@ class code_cache_base {

//these are really only useful to the async code cache, but keep them here so free_code can be shared
using queued_compilies_t = boost::multi_index_container<
code_tuple,
compile_wasm_message,
indexed_by<
sequenced<>,
hashed_unique<tag<by_hash>,
composite_key< code_tuple,
member<code_tuple, digest_type, &code_tuple::code_id>,
member<code_tuple, uint8_t, &code_tuple::vm_version>
>
>
member<compile_wasm_message, code_tuple, &compile_wasm_message::code>>
>
>;
queued_compilies_t _queued_compiles;
Expand All @@ -112,7 +127,7 @@ class code_cache_async : public code_cache_base {
//If code is in cache: returns pointer & bumps to front of MRU list
//If code is not in cache, and not blacklisted, and not currently compiling: return nullptr and kick off compile
//otherwise: return nullptr
const code_descriptor* const get_descriptor_for_code(bool high_priority, const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure);
const code_descriptor* const get_descriptor_for_code(mode m, const digest_type& code_id, const uint8_t& vm_version, get_cd_failure& failure);

private:
std::thread _monitor_reply_thread;
Expand All @@ -129,7 +144,7 @@ class code_cache_sync : public code_cache_base {
~code_cache_sync();

//Can still fail and return nullptr if, for example, there is an expected instantiation failure
const code_descriptor* const get_descriptor_for_code_sync(const digest_type& code_id, const uint8_t& vm_version, bool is_write_window);
const code_descriptor* const get_descriptor_for_code_sync(mode m, const digest_type& code_id, const uint8_t& vm_version);
};

}}}
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
#pragma once

#include <istream>
#include <ostream>
#include <vector>
#include <string>

#include <fc/io/raw.hpp>

#include <string>
#include <optional>

#include <sys/resource.h>

#ifndef __has_feature
Expand All @@ -15,12 +13,9 @@

namespace eosio { namespace chain { namespace eosvmoc {

struct config {
uint64_t cache_size = 1024u*1024u*1024u;
uint64_t threads = 1u;

struct subjective_compile_limits {
// subjective limits for OC compilation.
// nodeos enforces the limits by the default values.
// nodeos enforces the limits by the default values unless account is whitelisted.
// libtester disables the limits in all tests, except enforces the limits
// in the tests in unittests/eosvmoc_limits_tests.cpp.
std::optional<rlim_t> cpu_limit {20u};
Expand All @@ -33,34 +28,37 @@ struct config {
std::optional<size_t> generated_code_size_limit {16u*1024u*1024u};
};

struct config {
uint64_t cache_size = 1024u*1024u*1024u;
uint64_t threads = 1u;
subjective_compile_limits non_whitelisted_limits;
};

//work around unexpected std::optional behavior
template <typename DS>
inline DS& operator>>(DS& ds, eosio::chain::eosvmoc::config& cfg) {
fc::raw::pack(ds, cfg.cache_size);
fc::raw::pack(ds, cfg.threads);

auto better_optional_unpack = [&]<typename T>(std::optional<T>& t) {
inline DS& operator>>(DS& ds, eosio::chain::eosvmoc::subjective_compile_limits& cl) {
auto optional_unpack_with_reset = [&]<typename T>(std::optional<T>& t) {
bool b; fc::raw::unpack( ds, b );
if(b) { t = T(); fc::raw::unpack( ds, *t ); }
else { t.reset(); }
};
better_optional_unpack(cfg.cpu_limit);
better_optional_unpack(cfg.vm_limit);
better_optional_unpack(cfg.stack_size_limit);
better_optional_unpack(cfg.generated_code_size_limit);
optional_unpack_with_reset(cl.cpu_limit);
optional_unpack_with_reset(cl.vm_limit);
optional_unpack_with_reset(cl.stack_size_limit);
optional_unpack_with_reset(cl.generated_code_size_limit);

return ds;
}

template <typename DS>
inline DS& operator<<(DS& ds, const eosio::chain::eosvmoc::config& cfg) {
fc::raw::pack(ds, cfg.cache_size);
fc::raw::pack(ds, cfg.threads);
fc::raw::pack(ds, cfg.cpu_limit);
fc::raw::pack(ds, cfg.vm_limit);
fc::raw::pack(ds, cfg.stack_size_limit);
fc::raw::pack(ds, cfg.generated_code_size_limit);
inline DS& operator<<(DS& ds, const eosio::chain::eosvmoc::subjective_compile_limits& cl) {
fc::raw::pack(ds, cl.cpu_limit);
fc::raw::pack(ds, cl.vm_limit);
fc::raw::pack(ds, cl.stack_size_limit);
fc::raw::pack(ds, cl.generated_code_size_limit);
return ds;
}

}}}

FC_REFLECT(eosio::chain::eosvmoc::config, (cache_size)(threads)(non_whitelisted_limits))
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ struct code_tuple {

struct compile_wasm_message {
code_tuple code;
eosvmoc::config eosvmoc_config;
std::optional<eosvmoc::subjective_compile_limits> limits;
//Two sent fd: 1) communication socket for result, 2) the wasm to compile
};

Expand Down Expand Up @@ -63,7 +63,7 @@ using eosvmoc_message = std::variant<initialize_message,
FC_REFLECT(eosio::chain::eosvmoc::initialize_message, )
FC_REFLECT(eosio::chain::eosvmoc::initalize_response_message, (error_message))
FC_REFLECT(eosio::chain::eosvmoc::code_tuple, (code_id)(vm_version))
FC_REFLECT(eosio::chain::eosvmoc::compile_wasm_message, (code)(eosvmoc_config))
FC_REFLECT(eosio::chain::eosvmoc::compile_wasm_message, (code)(limits))
FC_REFLECT(eosio::chain::eosvmoc::evict_wasms_message, (codes))
FC_REFLECT(eosio::chain::eosvmoc::code_compilation_result_message, (start)(apply_offset)(starting_memory_pages)(initdata_prologue_size))
FC_REFLECT(eosio::chain::eosvmoc::compilation_result_unknownfailure, )
Expand Down
11 changes: 9 additions & 2 deletions libraries/chain/wasm_interface.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,15 @@ namespace eosio { namespace chain {
const chain::eosvmoc::code_descriptor* cd = nullptr;
chain::eosvmoc::code_cache_base::get_cd_failure failure = chain::eosvmoc::code_cache_base::get_cd_failure::temporary;
try {
const bool high_priority = context.get_receiver().prefix() == chain::config::system_account_name;
cd = my->eosvmoc->cc.get_descriptor_for_code(high_priority, code_hash, vm_version, context.control.is_write_window(), failure);
// Ideally all validator nodes would switch to using oc before block producer nodes so that validators
// are never overwhelmed. Compile whitelisted account contracts first on non-produced blocks. This makes
// it more likely that validators will switch to the oc compiled contract before the block producer runs
// an action for the contract with oc.
chain::eosvmoc::code_cache_async::mode m;
m.whitelisted = context.is_eos_vm_oc_whitelisted();
m.high_priority = m.whitelisted && context.is_applying_block();
m.write_window = context.control.is_write_window();
cd = my->eosvmoc->cc.get_descriptor_for_code(m, code_hash, vm_version, failure);
if (test_disable_tierup)
cd = nullptr;
} catch (...) {
Expand Down
5 changes: 4 additions & 1 deletion libraries/chain/webassembly/runtimes/eos-vm-oc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,10 @@ class eosvmoc_instantiated_module : public wasm_instantiated_module_interface {
bool is_main_thread() { return _main_thread_id == std::this_thread::get_id(); };

void apply(apply_context& context) override {
const code_descriptor* const cd = _eosvmoc_runtime.cc.get_descriptor_for_code_sync(_code_hash, _vm_version, context.control.is_write_window());
eosio::chain::eosvmoc::code_cache_sync::mode m;
m.whitelisted = context.is_eos_vm_oc_whitelisted();
m.write_window = context.control.is_write_window();
const code_descriptor* const cd = _eosvmoc_runtime.cc.get_descriptor_for_code_sync(m, _code_hash, _vm_version);
EOS_ASSERT(cd, wasm_execution_error, "EOS VM OC instantiation failed");

if ( is_main_thread() )
Expand Down
45 changes: 26 additions & 19 deletions libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,11 +106,11 @@ std::tuple<size_t, size_t> code_cache_async::consume_compile_thread_queue() {
}


const code_descriptor* const code_cache_async::get_descriptor_for_code(bool high_priority, const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure) {
const code_descriptor* const code_cache_async::get_descriptor_for_code(mode m, const digest_type& code_id, const uint8_t& vm_version, get_cd_failure& failure) {
//if there are any outstanding compiles, process the result queue now
//When app is in write window, all tasks are running sequentially and read-only threads
//are not running. Safe to update cache entries.
if(is_write_window && _outstanding_compiles_and_poison.size()) {
if(m.write_window && _outstanding_compiles_and_poison.size()) {
auto [count_processed, bytes_remaining] = consume_compile_thread_queue();

if(count_processed)
Expand All @@ -121,12 +121,12 @@ const code_descriptor* const code_cache_async::get_descriptor_for_code(bool high

//it's not clear this check is required: if apply() was called for code then it existed in the code_index; and then
// if we got notification of it no longer existing we would have removed it from queued_compiles
const code_object* const codeobject = _db.find<code_object,by_code_hash>(boost::make_tuple(nextup->code_id, 0, nextup->vm_version));
const code_object* const codeobject = _db.find<code_object,by_code_hash>(boost::make_tuple(nextup->code.code_id, 0, nextup->code.vm_version));
if(codeobject) {
_outstanding_compiles_and_poison.emplace(*nextup, false);
_outstanding_compiles_and_poison.emplace(nextup->code, false);
std::vector<wrapped_fd> fds_to_pass;
fds_to_pass.emplace_back(memfd_for_bytearray(codeobject->code));
FC_ASSERT(write_message_with_fds(_compile_monitor_write_socket, compile_wasm_message{ *nextup, _eosvmoc_config }, fds_to_pass), "EOS VM failed to communicate to OOP manager");
FC_ASSERT(write_message_with_fds(_compile_monitor_write_socket, *nextup, fds_to_pass), "EOS VM failed to communicate to OOP manager");
--count_processed;
}
_queued_compiles.erase(nextup);
Expand All @@ -136,36 +136,41 @@ const code_descriptor* const code_cache_async::get_descriptor_for_code(bool high
//check for entry in cache
code_cache_index::index<by_hash>::type::iterator it = _cache_index.get<by_hash>().find(boost::make_tuple(code_id, vm_version));
if(it != _cache_index.get<by_hash>().end()) {
if (is_write_window)
if (m.write_window)
_cache_index.relocate(_cache_index.begin(), _cache_index.project<0>(it));
return &*it;
}
if(!is_write_window) {
if(!m.write_window) {
failure = get_cd_failure::temporary; // Compile might not be done yet
return nullptr;
}

const code_tuple ct = code_tuple{code_id, vm_version};

if(_blacklist.find(ct) != _blacklist.end()) {
failure = get_cd_failure::permanent; // Compile will not start
return nullptr;
if (!m.whitelisted) {
failure = get_cd_failure::permanent; // Compile will not start
return nullptr;
}
// whitelisted, remove from blacklist and allow to try compile again
_blacklist.erase(ct);
}
if(auto it = _outstanding_compiles_and_poison.find(ct); it != _outstanding_compiles_and_poison.end()) {
failure = get_cd_failure::temporary; // Compile might not be done yet
it->second = false;
return nullptr;
}
if(auto it = _queued_compiles.get<by_hash>().find(boost::make_tuple(std::ref(code_id), vm_version)); it != _queued_compiles.get<by_hash>().end()) {
if(auto it = _queued_compiles.get<by_hash>().find(ct); it != _queued_compiles.get<by_hash>().end()) {
failure = get_cd_failure::temporary; // Compile might not be done yet
return nullptr;
}

auto msg = compile_wasm_message{ ct, !m.whitelisted ? _eosvmoc_config.non_whitelisted_limits : std::optional<subjective_compile_limits>{} };
if(_outstanding_compiles_and_poison.size() >= _threads) {
if (high_priority)
_queued_compiles.push_front(ct);
if (m.high_priority)
_queued_compiles.push_front(msg);
else
_queued_compiles.push_back(ct);
_queued_compiles.push_back(msg);
failure = get_cd_failure::temporary; // Compile might not be done yet
return nullptr;
}
Expand All @@ -179,7 +184,7 @@ const code_descriptor* const code_cache_async::get_descriptor_for_code(bool high
_outstanding_compiles_and_poison.emplace(ct, false);
std::vector<wrapped_fd> fds_to_pass;
fds_to_pass.emplace_back(memfd_for_bytearray(codeobject->code));
write_message_with_fds(_compile_monitor_write_socket, compile_wasm_message{ ct, _eosvmoc_config }, fds_to_pass);
write_message_with_fds(_compile_monitor_write_socket, msg, fds_to_pass);
failure = get_cd_failure::temporary; // Compile might not be done yet
return nullptr;
}
Expand All @@ -193,15 +198,15 @@ code_cache_sync::~code_cache_sync() {
elog("unexpected response from EOS VM OC compile monitor during shutdown");
}

const code_descriptor* const code_cache_sync::get_descriptor_for_code_sync(const digest_type& code_id, const uint8_t& vm_version, bool is_write_window) {
const code_descriptor* const code_cache_sync::get_descriptor_for_code_sync(mode m, const digest_type& code_id, const uint8_t& vm_version) {
//check for entry in cache
code_cache_index::index<by_hash>::type::iterator it = _cache_index.get<by_hash>().find(boost::make_tuple(code_id, vm_version));
if(it != _cache_index.get<by_hash>().end()) {
if (is_write_window)
if (m.write_window)
_cache_index.relocate(_cache_index.begin(), _cache_index.project<0>(it));
return &*it;
}
if(!is_write_window)
if(!m.write_window)
return nullptr;

const code_object* const codeobject = _db.find<code_object,by_code_hash>(boost::make_tuple(code_id, 0, vm_version));
Expand All @@ -211,7 +216,8 @@ const code_descriptor* const code_cache_sync::get_descriptor_for_code_sync(const
std::vector<wrapped_fd> fds_to_pass;
fds_to_pass.emplace_back(memfd_for_bytearray(codeobject->code));

write_message_with_fds(_compile_monitor_write_socket, compile_wasm_message{ {code_id, vm_version}, _eosvmoc_config }, fds_to_pass);
auto msg = compile_wasm_message{ {code_id, vm_version}, !m.whitelisted ? _eosvmoc_config.non_whitelisted_limits : std::optional<subjective_compile_limits>{} };
write_message_with_fds(_compile_monitor_write_socket, msg, fds_to_pass);
auto [success, message, fds] = read_message_with_fds(_compile_monitor_read_socket);
EOS_ASSERT(success, wasm_execution_error, "failed to read response from monitor process");
EOS_ASSERT(std::holds_alternative<wasm_compilation_result_message>(message), wasm_execution_error, "unexpected response from monitor process");
Expand Down Expand Up @@ -396,7 +402,8 @@ void code_cache_base::free_code(const digest_type& code_id, const uint8_t& vm_ve
}

//if it's in the queued list, erase it
if(auto i = _queued_compiles.get<by_hash>().find(boost::make_tuple(std::ref(code_id), vm_version)); i != _queued_compiles.get<by_hash>().end())
code_tuple ct{code_id, vm_version};
if(auto i = _queued_compiles.get<by_hash>().find(ct); i != _queued_compiles.get<by_hash>().end())
_queued_compiles.get<by_hash>().erase(i);

//however, if it's currently being compiled there is no way to cancel the compile,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ struct compile_monitor_session {
connection_dead_signal();
return;
}
kick_compile_off(compile.code, compile.eosvmoc_config, std::move(fds[0]));
kick_compile_off(compile.code, compile.limits, std::move(fds[0]));
},
[&](const evict_wasms_message& evict) {
for(const code_descriptor& cd : evict.codes) {
Expand All @@ -90,7 +90,7 @@ struct compile_monitor_session {
});
}

void kick_compile_off(const code_tuple& code_id, const eosvmoc::config& eosvmoc_config, wrapped_fd&& wasm_code) {
void kick_compile_off(const code_tuple& code_id, const std::optional<eosvmoc::subjective_compile_limits>& limits, wrapped_fd&& wasm_code) {
//prepare a requst to go out to the trampoline
int socks[2];
socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, socks);
Expand All @@ -100,7 +100,7 @@ struct compile_monitor_session {
fds_pass_to_trampoline.emplace_back(socks[1]);
fds_pass_to_trampoline.emplace_back(std::move(wasm_code));

eosvmoc_message trampoline_compile_request = compile_wasm_message{code_id, eosvmoc_config};
eosvmoc_message trampoline_compile_request = compile_wasm_message{code_id, limits};
if(write_message_with_fds(_trampoline_socket, trampoline_compile_request, fds_pass_to_trampoline) == false) {
wasm_compilation_result_message reply{code_id, compilation_result_unknownfailure{}, _allocator->get_free_memory()};
write_message_with_fds(_nodeos_instance_socket, reply);
Expand Down
Loading

0 comments on commit 854c770

Please sign in to comment.