#include <scx/common.bpf.h>
#include <lib/sdt_task.h>
#include <lib/dhq.h>
__weak
u64 scx_dhq_create_internal(bool fifo, size_t capacity, u64 mode, u64 max_imbalance)
{
scx_dhq_t *dhq;
u64 heap_capacity;
dhq = scx_static_alloc(sizeof(*dhq), 1);
if (!dhq)
return (u64)NULL;
heap_capacity = capacity / 2;
dhq->strand_a = scx_minheap_alloc(heap_capacity);
if (!dhq->strand_a)
return (u64)NULL;
dhq->strand_b = scx_minheap_alloc(heap_capacity);
if (!dhq->strand_b)
return (u64)NULL;
dhq->fifo = fifo;
dhq->capacity = capacity;
dhq->mode = mode;
dhq->max_imbalance = max_imbalance;
dhq->last_strand = SCX_DHQ_STRAND_B;
return (u64)dhq;
}
static inline
int __scx_dhq_insert_strand(scx_dhq_t *dhq, u64 taskc_ptr, u64 strand, u64 key)
{
scx_minheap_t *heap;
u64 my_size, other_size;
int ret;
heap = (strand == SCX_DHQ_STRAND_A) ? dhq->strand_a : dhq->strand_b;
ret = arena_spin_lock(&dhq->lock);
if (ret)
return ret;
if (unlikely(dhq->size_a + dhq->size_b == dhq->capacity)) {
ret = -ENOSPC;
goto error;
}
if (dhq->max_imbalance > 0) {
if (strand == SCX_DHQ_STRAND_A) {
my_size = dhq->size_a;
other_size = dhq->size_b;
} else {
my_size = dhq->size_b;
other_size = dhq->size_a;
}
if (my_size >= other_size + dhq->max_imbalance) {
ret = -EAGAIN;
goto error;
}
}
ret = scx_minheap_insert(heap, taskc_ptr, key);
if (ret)
goto error;
if (strand == SCX_DHQ_STRAND_A)
dhq->size_a += 1;
else
dhq->size_b += 1;
arena_spin_unlock(&dhq->lock);
return 0;
error:
arena_spin_unlock(&dhq->lock);
return ret;
}
__hidden
int scx_dhq_insert(scx_dhq_t *dhq, u64 taskc_ptr, u64 strand)
{
u64 key, selected_strand;
if (!dhq->fifo)
return -EINVAL;
if (strand == SCX_DHQ_STRAND_AUTO) {
selected_strand = (dhq->size_a <= dhq->size_b) ?
SCX_DHQ_STRAND_A : SCX_DHQ_STRAND_B;
} else {
selected_strand = strand;
}
if (selected_strand == SCX_DHQ_STRAND_A) {
key = dhq->seq_a++;
} else {
key = dhq->seq_b++;
}
return __scx_dhq_insert_strand(dhq, taskc_ptr, selected_strand, key);
}
__hidden
int scx_dhq_insert_vtime(scx_dhq_t *dhq, u64 taskc_ptr, u64 vtime, u64 strand)
{
u64 selected_strand;
if (dhq->fifo)
return -EINVAL;
if (strand == SCX_DHQ_STRAND_AUTO) {
selected_strand = (dhq->size_a <= dhq->size_b) ?
SCX_DHQ_STRAND_A : SCX_DHQ_STRAND_B;
} else {
selected_strand = strand;
}
return __scx_dhq_insert_strand(dhq, taskc_ptr, selected_strand, vtime);
}
static inline
u64 __scx_dhq_pop_strand_nolock(scx_dhq_t *dhq, u64 strand)
{
scx_minheap_t *heap;
struct scx_minheap_elem helem;
int ret;
heap = (strand == SCX_DHQ_STRAND_A) ? dhq->strand_a : dhq->strand_b;
if ((strand == SCX_DHQ_STRAND_A && dhq->size_a == 0) ||
(strand == SCX_DHQ_STRAND_B && dhq->size_b == 0))
return (u64)NULL;
ret = scx_minheap_pop(heap, &helem);
if (!ret) {
if (strand == SCX_DHQ_STRAND_A) {
dhq->size_a -= 1;
dhq->dequeue_count_a += 1;
} else {
dhq->size_b -= 1;
dhq->dequeue_count_b += 1;
}
return helem.elem;
}
return (u64)NULL;
}
__hidden
u64 scx_dhq_pop_strand(scx_dhq_t *dhq, u64 strand)
{
u64 taskc_ptr;
int ret;
ret = arena_spin_lock(&dhq->lock);
if (ret)
return (u64)NULL;
taskc_ptr = __scx_dhq_pop_strand_nolock(dhq, strand);
arena_spin_unlock(&dhq->lock);
return taskc_ptr;
}
__hidden
u64 scx_dhq_pop(scx_dhq_t *dhq)
{
u64 taskc_ptr;
u64 vtime_a, vtime_b;
u64 strand;
int ret;
ret = arena_spin_lock(&dhq->lock);
if (ret)
return (u64)NULL;
if (scx_dhq_nr_queued(dhq) == 0) {
arena_spin_unlock(&dhq->lock);
return (u64)NULL;
}
switch (dhq->mode) {
case SCX_DHQ_MODE_ALTERNATING:
strand = (dhq->last_strand == SCX_DHQ_STRAND_A) ?
SCX_DHQ_STRAND_B : SCX_DHQ_STRAND_A;
taskc_ptr = __scx_dhq_pop_strand_nolock(dhq, strand);
if (!taskc_ptr) {
strand = (strand == SCX_DHQ_STRAND_A) ?
SCX_DHQ_STRAND_B : SCX_DHQ_STRAND_A;
taskc_ptr = __scx_dhq_pop_strand_nolock(dhq, strand);
}
if (taskc_ptr)
dhq->last_strand = strand;
break;
case SCX_DHQ_MODE_PRIORITY:
if (dhq->strand_a->size > 0) {
vtime_a = dhq->strand_a->helems[0].weight;
} else {
vtime_a = (u64)-1;
}
if (dhq->strand_b->size > 0) {
vtime_b = dhq->strand_b->helems[0].weight;
} else {
vtime_b = (u64)-1;
}
if (vtime_a <= vtime_b) {
taskc_ptr = __scx_dhq_pop_strand_nolock(dhq, SCX_DHQ_STRAND_A);
if (!taskc_ptr) {
taskc_ptr = __scx_dhq_pop_strand_nolock(dhq, SCX_DHQ_STRAND_B);
if (taskc_ptr)
dhq->last_strand = SCX_DHQ_STRAND_B;
} else {
dhq->last_strand = SCX_DHQ_STRAND_A;
}
} else {
taskc_ptr = __scx_dhq_pop_strand_nolock(dhq, SCX_DHQ_STRAND_B);
if (!taskc_ptr) {
taskc_ptr = __scx_dhq_pop_strand_nolock(dhq, SCX_DHQ_STRAND_A);
if (taskc_ptr)
dhq->last_strand = SCX_DHQ_STRAND_A;
} else {
dhq->last_strand = SCX_DHQ_STRAND_B;
}
}
break;
case SCX_DHQ_MODE_BALANCED:
if (dhq->size_a >= dhq->size_b) {
taskc_ptr = __scx_dhq_pop_strand_nolock(dhq, SCX_DHQ_STRAND_A);
if (!taskc_ptr)
taskc_ptr = __scx_dhq_pop_strand_nolock(dhq, SCX_DHQ_STRAND_B);
else
dhq->last_strand = SCX_DHQ_STRAND_A;
} else {
taskc_ptr = __scx_dhq_pop_strand_nolock(dhq, SCX_DHQ_STRAND_B);
if (!taskc_ptr)
taskc_ptr = __scx_dhq_pop_strand_nolock(dhq, SCX_DHQ_STRAND_A);
else
dhq->last_strand = SCX_DHQ_STRAND_B;
}
break;
default:
taskc_ptr = (u64)NULL;
break;
}
arena_spin_unlock(&dhq->lock);
return taskc_ptr;
}
static inline
u64 __scx_dhq_peek_strand_nolock(scx_dhq_t *dhq, u64 strand)
{
scx_minheap_t *heap;
heap = (strand == SCX_DHQ_STRAND_A) ? dhq->strand_a : dhq->strand_b;
if (heap->size == 0)
return (u64)NULL;
return heap->helems[0].elem;
}
__hidden
u64 scx_dhq_peek_strand(scx_dhq_t *dhq, u64 strand)
{
u64 taskc_ptr;
int ret;
ret = arena_spin_lock(&dhq->lock);
if (ret)
return (u64)NULL;
taskc_ptr = __scx_dhq_peek_strand_nolock(dhq, strand);
arena_spin_unlock(&dhq->lock);
return taskc_ptr;
}
__hidden
u64 scx_dhq_peek(scx_dhq_t *dhq)
{
u64 taskc_ptr;
u64 vtime_a, vtime_b;
u64 strand;
int ret;
ret = arena_spin_lock(&dhq->lock);
if (ret)
return (u64)NULL;
if (scx_dhq_nr_queued(dhq) == 0) {
arena_spin_unlock(&dhq->lock);
return (u64)NULL;
}
switch (dhq->mode) {
case SCX_DHQ_MODE_ALTERNATING:
strand = (dhq->last_strand == SCX_DHQ_STRAND_A) ?
SCX_DHQ_STRAND_B : SCX_DHQ_STRAND_A;
taskc_ptr = __scx_dhq_peek_strand_nolock(dhq, strand);
if (!taskc_ptr) {
strand = (strand == SCX_DHQ_STRAND_A) ?
SCX_DHQ_STRAND_B : SCX_DHQ_STRAND_A;
taskc_ptr = __scx_dhq_peek_strand_nolock(dhq, strand);
}
break;
case SCX_DHQ_MODE_PRIORITY:
if (dhq->strand_a->size > 0) {
vtime_a = dhq->strand_a->helems[0].weight;
} else {
vtime_a = (u64)-1;
}
if (dhq->strand_b->size > 0) {
vtime_b = dhq->strand_b->helems[0].weight;
} else {
vtime_b = (u64)-1;
}
if (vtime_a <= vtime_b)
taskc_ptr = __scx_dhq_peek_strand_nolock(dhq, SCX_DHQ_STRAND_A);
else
taskc_ptr = __scx_dhq_peek_strand_nolock(dhq, SCX_DHQ_STRAND_B);
break;
case SCX_DHQ_MODE_BALANCED:
if (dhq->size_a >= dhq->size_b) {
taskc_ptr = __scx_dhq_peek_strand_nolock(dhq, SCX_DHQ_STRAND_A);
if (!taskc_ptr)
taskc_ptr = __scx_dhq_peek_strand_nolock(dhq, SCX_DHQ_STRAND_B);
} else {
taskc_ptr = __scx_dhq_peek_strand_nolock(dhq, SCX_DHQ_STRAND_B);
if (!taskc_ptr)
taskc_ptr = __scx_dhq_peek_strand_nolock(dhq, SCX_DHQ_STRAND_A);
}
break;
default:
taskc_ptr = (u64)NULL;
break;
}
arena_spin_unlock(&dhq->lock);
return taskc_ptr;
}
__hidden
int scx_dhq_nr_queued(scx_dhq_t *dhq)
{
return dhq->size_a + dhq->size_b;
}
__hidden
int scx_dhq_nr_queued_strand(scx_dhq_t *dhq, u64 strand)
{
return (strand == SCX_DHQ_STRAND_A) ? dhq->size_a : dhq->size_b;
}