// Notes on an expanded language around dice expressions.
prim type Int.
prim type Set(Type).
prim type Boolean.
// `const type`s are types whose values
// must be statically known.
prim const type Type.
prim const type Filter.
intrinsic roll(Int, Int) -> Set(Int).
intrinsic filter(Set(Int), Filter) -> Set(Int).
fn explode(count, sides, target) {
let stack = new Stack.
loop {
if (count == 0) { break }
let result = roll(count, sides).
stack:push(result).
count = result:filter(die => die == target):count().
}
stack
}
cmd shadowrun(count, rule_of_six) {
if (rule_of_six) {
explode(count, 6, 6)
} else {
roll(count, 6)
}
}
// The `pre_eval` stage is a trick to make an
// argument block of code a data dependency
// of a thing of our choosing, by how we use
// the special `do(...)` construct.
// Essentially, we delay evaluation of our argument expressions,
// so we can choose where their evaluation takes place.
// We also use `meta` blocks to do things at
// MIR construction time, like validating expression types.
// `meta` and `do` are only available in the `pre_eval` stage,
// which operators and functions must be explicitly opted into.
postfix(5) dice op(stage = pre_eval) "!" (code) {
let stack = new Stack.
let count.
meta {
// TODO: figure out what other assertions would make sense here.
// Perhaps have a contract pieces of code can opt into?
assert(ret_type_of(code) == Set(Int)).
assert(ret_type_of(code:inputs():first()) == Int).
// This is meant to rewrite a data dependency from a constant to
// a variable we mutate with each iteration of the loop.
// Perhaps store named inputs on the MIR so we can go `code:query_in("count")`
// or the like.
code:inputs():first():replace_with(x => { count = x. &count }).
}
loop {
if (count == 0) { break }
let result = do(code).
stack:push(result).
count = result:filter(die => die == target):count().
}
stack
}
// Or better, we can declare what stage we receive each argument at.
// Where a function receives an argument at `post_eval`, it may choose
// to leave the stage annotation off its signature for equivalent effect.
// That is, the stage defaults to `post_eval` when omitted on a parameter.
fn explode(code @ (stage = pre_eval), target @ (stage = post_eval)) {
meta {
}
}
// Stages reference:
// # `ast` (AST)?
// # `pre_eval` (MIR)
// # `post_eval` (Value)
// For MIR arguments, we should only provide access to the part of the
// data dependency graph corresponding to what is syntactically present
// within the function call. This is both for ease of authoring effective
// functions that use `pre_eval` parameters, and for preservation of
// encapsulation boundaries. Giving every function access to the whole history
// that produced their arguments would become unwieldy rather quickly.
// We can have special syntax for creating larger such segments,
// and for passing them to such functions.
// The meta-language stuff should be presented secondary to the value
// language, since this is meant as an end user facing scripting language
// for extending dice expressions.
meta {
// This syntax for inhibiting evaluation outside of a function call expression
// shall only be usable inside of `meta` blocks.
let expression = #{
4d6k3 // Yes, we have dice expression literals.
}.
}
// Variable bindings created within `meta` blocks shall only be usable
// from within meta blocks. I may relax this restriction at some point,
// but I want to see how things play out first.
explode(meta { expression }, 6)
// External data dependencies must be reflected in the MIR,
// though we'll erase them with further lowering.
// Due to this, embedding SQL like so:
fn freak() {
let x;
meta {
x = DB:query("SELECT freak WHERE player = @invoker
AND channel = @channel
FROM @charData;").
x = DB:query_prolog {
freak(Out
}
}
x
}
// Is a bad idea, since `meta` blocks are meant to be erased
// during lowering to MIR.
// Instead, whatever builtin function is used will not be a `meta` one.
fn freak() {
builtin:cmd_context("freak")
}
// The module system will need to accommodate having different games
// with different command and function definitions, with various other contextual
// data attached. This way, custom commands will not need to be very robust
// to varying game systems, since they can just be very specific to where they're used.
// At the same time, I'd like to enable code sharing where possible.
// Code sharing, though, is only feasible from a user's point of view if they can
// easily go back and edit previously submitted code.
// A Masks specific command, for rolling `2d6 + <character's Freak modifier>.
cmd freak(name) {
let player = builtin:context():player.
name = name:unwrap_or(player:default_character:name).
2d6 + player:lookup_character(name):freak
}
cmd record(char) {
}
// !record ff freak = 1
// !record ff [freak = 1, danger = -1, savior = 1, superior = 3, mundane = -1]
// A policy on type inference.
// This language has different goals from Rust.
// It is meant to be easy to use for small things,
// by people who have no prior programming background,
// nor any specific desire to learn.
// Therefore, type inference is essential, but global type inference
// is untenable both for its terrible error messages and
// the segmented entry method I'm likely going to support.
// Instead, we'll do function local type inference, including
// parameter and return types.
// We may also provide default types for literal expressions
// that can resolve to more than one type, in case the specific type
// is unclear from usage. (This is uncertain, however.)
// We'll also want to provide a way to print annotated versions
// of successfully type inferred programs, for debugging purposes.
fn my_lang() {
builtin:dice:lang:default():combine(operators {
_k_ = _kh_ = builtin:filter:KeepHigh @ (infix = (7, 8))
_kl_ = builtin:filter:KeepLow @ (infix = (7, 8))
_d_ = _dh_ = builtin:filter:DropHigh @ (infix = (7, 8))
_dl_ = builtin:filter:DropLow @ (infix = (7, 8))
_! = explode @ (postfix = 7)
})
}
cmd roll(exp) {
roll_with(exp, my_lang())
}