#ifndef LLVM_LIBC_SHARED_RPC_H
#define LLVM_LIBC_SHARED_RPC_H
#include "rpc_util.h"
namespace rpc {
#if !__has_builtin(__scoped_atomic_load_n)
#define __scoped_atomic_load_n(src, ord, scp) __atomic_load_n(src, ord)
#define __scoped_atomic_store_n(dst, src, ord, scp) \
__atomic_store_n(dst, src, ord)
#define __scoped_atomic_fetch_or(src, val, ord, scp) \
__atomic_fetch_or(src, val, ord)
#define __scoped_atomic_fetch_and(src, val, ord, scp) \
__atomic_fetch_and(src, val, ord)
#endif
#if !__has_builtin(__scoped_atomic_thread_fence)
#define __scoped_atomic_thread_fence(ord, scp) __atomic_thread_fence(ord)
#endif
enum Status {
RPC_SUCCESS = 0x0,
RPC_ERROR = 0x1000,
RPC_UNHANDLED_OPCODE = 0x1001,
};
struct Buffer {
uint64_t data[8];
};
static_assert(sizeof(Buffer) == 64, "Buffer size mismatch");
struct Header {
uint64_t mask;
uint32_t opcode;
};
constexpr static uint64_t MAX_PORT_COUNT = 4096;
template <bool Invert> struct Process {
RPC_ATTRS Process() = default;
RPC_ATTRS Process(const Process &) = delete;
RPC_ATTRS Process &operator=(const Process &) = delete;
RPC_ATTRS Process(Process &&) = default;
RPC_ATTRS Process &operator=(Process &&) = default;
RPC_ATTRS ~Process() = default;
const uint32_t port_count = 0;
const uint32_t *const inbox = nullptr;
uint32_t *const outbox = nullptr;
Header *const header = nullptr;
Buffer *const packet = nullptr;
static constexpr uint64_t NUM_BITS_IN_WORD = sizeof(uint32_t) * 8;
uint32_t lock[MAX_PORT_COUNT / NUM_BITS_IN_WORD] = {0};
RPC_ATTRS Process(uint32_t port_count, void *buffer)
: port_count(port_count), inbox(reinterpret_cast<uint32_t *>(
advance(buffer, inbox_offset(port_count)))),
outbox(reinterpret_cast<uint32_t *>(
advance(buffer, outbox_offset(port_count)))),
header(reinterpret_cast<Header *>(
advance(buffer, header_offset(port_count)))),
packet(reinterpret_cast<Buffer *>(
advance(buffer, buffer_offset(port_count)))) {}
RPC_ATTRS static constexpr uint64_t allocation_size(uint32_t port_count,
uint32_t lane_size) {
return buffer_offset(port_count) + buffer_bytes(port_count, lane_size);
}
RPC_ATTRS uint32_t load_inbox(uint64_t lane_mask, uint32_t index) const {
return rpc::broadcast_value(
lane_mask, __scoped_atomic_load_n(&inbox[index], __ATOMIC_RELAXED,
__MEMORY_SCOPE_SYSTEM));
}
RPC_ATTRS uint32_t load_outbox(uint64_t lane_mask, uint32_t index) const {
return rpc::broadcast_value(
lane_mask, __scoped_atomic_load_n(&outbox[index], __ATOMIC_RELAXED,
__MEMORY_SCOPE_SYSTEM));
}
RPC_ATTRS uint32_t invert_outbox(uint64_t lane_mask, uint32_t index,
uint32_t current_outbox) {
uint32_t inverted_outbox = !current_outbox;
rpc::sync_lane(lane_mask);
__scoped_atomic_thread_fence(__ATOMIC_RELEASE, __MEMORY_SCOPE_SYSTEM);
if (rpc::is_first_lane(lane_mask))
__scoped_atomic_store_n(&outbox[index], inverted_outbox, __ATOMIC_RELAXED,
__MEMORY_SCOPE_SYSTEM);
return inverted_outbox;
}
RPC_ATTRS void wait_for_ownership(uint64_t lane_mask, uint32_t index,
uint32_t outbox, uint32_t in) {
while (buffer_unavailable(in, outbox)) {
sleep_briefly();
in = load_inbox(lane_mask, index);
}
__scoped_atomic_thread_fence(__ATOMIC_ACQUIRE, __MEMORY_SCOPE_SYSTEM);
}
RPC_ATTRS Buffer *get_packet(uint32_t index, uint32_t lane_size) {
return &packet[index * lane_size];
}
RPC_ATTRS static bool buffer_unavailable(uint32_t in, uint32_t out) {
bool cond = in != out;
return Invert ? !cond : cond;
}
RPC_ATTRS bool try_lock(uint64_t lane_mask, uint32_t index) {
uint32_t id = rpc::get_lane_id();
bool id_in_lane_mask = lane_mask & (1ul << id);
bool before = set_nth(lock, index, id_in_lane_mask);
uint64_t packed = rpc::ballot(lane_mask, before);
bool holding_lock = lane_mask != packed;
if (holding_lock)
__scoped_atomic_thread_fence(__ATOMIC_ACQUIRE, __MEMORY_SCOPE_DEVICE);
return holding_lock;
}
RPC_ATTRS void unlock(uint64_t lane_mask, uint32_t index) {
__scoped_atomic_thread_fence(__ATOMIC_RELEASE, __MEMORY_SCOPE_DEVICE);
clear_nth(lock, index, rpc::is_first_lane(lane_mask));
rpc::sync_lane(lane_mask);
}
RPC_ATTRS static constexpr uint64_t mailbox_bytes(uint32_t port_count) {
return port_count * sizeof(uint32_t);
}
RPC_ATTRS static constexpr uint64_t buffer_bytes(uint32_t port_count,
uint32_t lane_size) {
return port_count * lane_size * sizeof(Buffer);
}
RPC_ATTRS static constexpr uint64_t inbox_offset(uint32_t port_count) {
return Invert ? mailbox_bytes(port_count) : 0;
}
RPC_ATTRS static constexpr uint64_t outbox_offset(uint32_t port_count) {
return Invert ? 0 : mailbox_bytes(port_count);
}
RPC_ATTRS static constexpr uint64_t header_offset(uint32_t port_count) {
return align_up(2 * mailbox_bytes(port_count), alignof(Header));
}
RPC_ATTRS static constexpr uint64_t buffer_offset(uint32_t port_count) {
return align_up(header_offset(port_count) + port_count * sizeof(Header),
alignof(Buffer));
}
RPC_ATTRS static constexpr uint32_t set_nth(uint32_t *bits, uint32_t index,
bool cond) {
uint32_t slot = index / NUM_BITS_IN_WORD;
uint32_t bit = index % NUM_BITS_IN_WORD;
return __scoped_atomic_fetch_or(&bits[slot],
static_cast<uint32_t>(cond) << bit,
__ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE) &
(1u << bit);
}
RPC_ATTRS static constexpr uint32_t clear_nth(uint32_t *bits, uint32_t index,
bool cond) {
uint32_t slot = index / NUM_BITS_IN_WORD;
uint32_t bit = index % NUM_BITS_IN_WORD;
return __scoped_atomic_fetch_and(&bits[slot],
~0u ^ (static_cast<uint32_t>(cond) << bit),
__ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE) &
(1u << bit);
}
};
template <typename F>
RPC_ATTRS static void invoke_rpc(F &&fn, uint32_t lane_size, uint64_t lane_mask,
Buffer *slot) {
if constexpr (is_process_gpu()) {
fn(&slot[rpc::get_lane_id()], rpc::get_lane_id());
} else {
for (uint32_t i = 0; i < lane_size; i += rpc::get_num_lanes())
if (lane_mask & (1ul << i))
fn(&slot[i], i);
}
}
template <bool T> struct Port {
RPC_ATTRS Port(Process<T> &process, uint64_t lane_mask, uint32_t lane_size,
uint32_t index, uint32_t out)
: process(process), lane_mask(lane_mask), lane_size(lane_size),
index(index), out(out), receive(false), owns_buffer(true) {}
RPC_ATTRS ~Port() { close(); }
private:
RPC_ATTRS Port(const Port &) = delete;
RPC_ATTRS Port &operator=(const Port &) = delete;
RPC_ATTRS Port(Port &&) = delete;
RPC_ATTRS Port &operator=(Port &&) = delete;
friend struct Client;
friend struct Server;
friend class rpc::optional<Port<T>>;
public:
template <typename U> RPC_ATTRS void recv(U use);
template <typename F> RPC_ATTRS void send(F fill);
template <typename F, typename U> RPC_ATTRS void send_and_recv(F fill, U use);
template <typename W> RPC_ATTRS void recv_and_send(W work);
RPC_ATTRS void send_n(const void *const *src, uint64_t *size);
RPC_ATTRS void send_n(const void *src, uint64_t size);
template <typename A>
RPC_ATTRS void recv_n(void **dst, uint64_t *size, A &&alloc);
template <typename Ty> RPC_ATTRS void send_n(const Ty *src);
template <typename Ty> RPC_ATTRS void recv_n(Ty *dst);
RPC_ATTRS uint32_t get_opcode() const { return process.header[index].opcode; }
RPC_ATTRS uint32_t get_index() const { return index; }
RPC_ATTRS uint64_t get_lane_mask() const {
if constexpr (T)
return process.header[index].mask;
return lane_mask;
}
private:
RPC_ATTRS void close() {
rpc::sync_lane(lane_mask);
if (owns_buffer && T)
out = process.invert_outbox(lane_mask, index, out);
process.unlock(lane_mask, index);
}
Process<T> &process;
uint64_t lane_mask;
uint32_t lane_size;
uint32_t index;
uint32_t out;
bool receive;
bool owns_buffer;
};
struct Client {
RPC_ATTRS Client() = default;
RPC_ATTRS Client(const Client &) = delete;
RPC_ATTRS Client &operator=(const Client &) = delete;
RPC_ATTRS ~Client() = default;
RPC_ATTRS Client(uint32_t port_count, void *buffer)
: process(port_count, buffer) {}
using Port = rpc::Port<false>;
template <uint32_t opcode> RPC_ATTRS Port open();
private:
Process<false> process;
};
struct Server {
RPC_ATTRS Server() = default;
RPC_ATTRS Server(const Server &) = delete;
RPC_ATTRS Server &operator=(const Server &) = delete;
RPC_ATTRS ~Server() = default;
RPC_ATTRS Server(uint32_t port_count, void *buffer)
: process(port_count, buffer) {}
using Port = rpc::Port<true>;
RPC_ATTRS rpc::optional<Port> try_open(uint32_t lane_size,
uint32_t start = 0);
RPC_ATTRS static constexpr uint64_t allocation_size(uint32_t lane_size,
uint32_t port_count) {
return Process<true>::allocation_size(port_count, lane_size);
}
private:
Process<true> process;
};
template <bool T> template <typename F> RPC_ATTRS void Port<T>::send(F fill) {
uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);
process.wait_for_ownership(lane_mask, index, out, in);
invoke_rpc(fill, lane_size, get_lane_mask(),
process.get_packet(index, lane_size));
out = process.invert_outbox(lane_mask, index, out);
owns_buffer = false;
receive = false;
}
template <bool T> template <typename U> RPC_ATTRS void Port<T>::recv(U use) {
if (receive) {
out = process.invert_outbox(lane_mask, index, out);
owns_buffer = false;
}
uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);
process.wait_for_ownership(lane_mask, index, out, in);
invoke_rpc(use, lane_size, get_lane_mask(),
process.get_packet(index, lane_size));
receive = true;
owns_buffer = true;
}
template <bool T>
template <typename F, typename U>
RPC_ATTRS void Port<T>::send_and_recv(F fill, U use) {
send(fill);
recv(use);
}
template <bool T>
template <typename W>
RPC_ATTRS void Port<T>::recv_and_send(W work) {
recv(work);
send([](Buffer *, uint32_t) { });
}
template <bool T>
RPC_ATTRS void Port<T>::send_n(const void *src, uint64_t size) {
const void **src_ptr = &src;
uint64_t *size_ptr = &size;
send_n(src_ptr, size_ptr);
}
template <bool T>
RPC_ATTRS void Port<T>::send_n(const void *const *src, uint64_t *size) {
uint64_t num_sends = 0;
send([&](Buffer *buffer, uint32_t id) {
reinterpret_cast<uint64_t *>(buffer->data)[0] = lane_value(size, id);
num_sends = is_process_gpu() ? lane_value(size, id)
: rpc::max(lane_value(size, id), num_sends);
uint64_t len =
lane_value(size, id) > sizeof(Buffer::data) - sizeof(uint64_t)
? sizeof(Buffer::data) - sizeof(uint64_t)
: lane_value(size, id);
rpc_memcpy(&buffer->data[1], lane_value(src, id), len);
});
uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
uint64_t mask = process.header[index].mask;
while (rpc::ballot(mask, idx < num_sends)) {
send([=](Buffer *buffer, uint32_t id) {
uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)
? sizeof(Buffer::data)
: lane_value(size, id) - idx;
if (idx < lane_value(size, id))
rpc_memcpy(buffer->data, advance(lane_value(src, id), idx), len);
});
idx += sizeof(Buffer::data);
}
}
template <bool T>
template <typename A>
RPC_ATTRS void Port<T>::recv_n(void **dst, uint64_t *size, A &&alloc) {
uint64_t num_recvs = 0;
recv([&](Buffer *buffer, uint32_t id) {
lane_value(size, id) = reinterpret_cast<uint64_t *>(buffer->data)[0];
lane_value(dst, id) =
reinterpret_cast<uint8_t *>(alloc(lane_value(size, id)));
num_recvs = is_process_gpu() ? lane_value(size, id)
: rpc::max(lane_value(size, id), num_recvs);
uint64_t len =
lane_value(size, id) > sizeof(Buffer::data) - sizeof(uint64_t)
? sizeof(Buffer::data) - sizeof(uint64_t)
: lane_value(size, id);
rpc_memcpy(lane_value(dst, id), &buffer->data[1], len);
});
uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
uint64_t mask = process.header[index].mask;
while (rpc::ballot(mask, idx < num_recvs)) {
recv([=](Buffer *buffer, uint32_t id) {
uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)
? sizeof(Buffer::data)
: lane_value(size, id) - idx;
if (idx < lane_value(size, id))
rpc_memcpy(advance(lane_value(dst, id), idx), buffer->data, len);
});
idx += sizeof(Buffer::data);
}
}
template <bool T>
template <typename Ty>
RPC_ATTRS void Port<T>::send_n(const Ty *src) {
for (uint64_t idx = 0; idx < sizeof(Ty); idx += sizeof(Buffer::data)) {
const uint64_t bytes = rpc::min(sizeof(Ty) - idx, sizeof(Buffer::data));
send([&](Buffer *buffer, uint32_t id) {
rpc_memcpy(buffer->data, advance(&lane_value(src, id), idx), bytes);
});
}
}
template <bool T>
template <typename Ty>
RPC_ATTRS void Port<T>::recv_n(Ty *dst) {
for (uint64_t idx = 0; idx < sizeof(Ty); idx += sizeof(Buffer::data)) {
const uint64_t bytes = rpc::min(sizeof(Ty) - idx, sizeof(Buffer::data));
recv([&](Buffer *buffer, uint32_t id) {
rpc_memcpy(advance(&lane_value(dst, id), idx), buffer->data, bytes);
});
}
}
template <uint32_t opcode> RPC_ATTRS Client::Port Client::open() {
for (uint32_t index = 0;; ++index) {
if (index >= process.port_count)
index = 0;
uint64_t lane_mask = rpc::get_lane_mask();
index = rpc::broadcast_value(lane_mask, index);
if (!process.try_lock(lane_mask, index))
continue;
uint32_t in = process.load_inbox(lane_mask, index);
uint32_t out = process.load_outbox(lane_mask, index);
if (process.buffer_unavailable(in, out)) {
process.unlock(lane_mask, index);
continue;
}
if (rpc::is_first_lane(lane_mask)) {
process.header[index].opcode = opcode;
process.header[index].mask = lane_mask;
}
rpc::sync_lane(lane_mask);
return Port(process, lane_mask, rpc::get_num_lanes(), index, out);
}
}
RPC_ATTRS rpc::optional<typename Server::Port>
Server::try_open(uint32_t lane_size, uint32_t start) {
if (rpc::get_lane_id() >= lane_size)
return rpc::nullopt;
for (uint32_t index = start; index < process.port_count; ++index) {
uint64_t lane_mask = rpc::get_lane_mask();
uint32_t in = process.load_inbox(lane_mask, index);
uint32_t out = process.load_outbox(lane_mask, index);
if (process.buffer_unavailable(in, out))
continue;
if (!process.try_lock(lane_mask, index))
continue;
in = process.load_inbox(lane_mask, index);
out = process.load_outbox(lane_mask, index);
if (process.buffer_unavailable(in, out)) {
process.unlock(lane_mask, index);
continue;
}
return rpc::optional<Port>(rpc::in_place, process, lane_mask, lane_size,
index, out);
}
return rpc::nullopt;
}
#if !__has_builtin(__scoped_atomic_load_n)
#undef __scoped_atomic_load_n
#undef __scoped_atomic_store_n
#undef __scoped_atomic_fetch_or
#undef __scoped_atomic_fetch_and
#endif
#if !__has_builtin(__scoped_atomic_thread_fence)
#undef __scoped_atomic_thread_fence
#endif
}
#endif