llama-cpp-sys-4 0.2.46

Low Level Bindings to llama.cpp
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
#pragma clang diagnostic ignored "-Wunused-variable"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wunused-but-set-variable"

#include <HAP_farf.h>
#include <HAP_perf.h>

#include <math.h>
#include <string.h>

#include "hex-dma.h"
#include "hvx-utils.h"
#include "hex-fastdiv.h"

#define GGML_COMMON_DECL_C
#include "ggml-common.h"
#include "htp-ctx.h"
#include "htp-ops.h"
#include "htp-ops.h"

// Redefined the types GGML_ROPE_TYPE_NORMAL & GGML_ROPE_TYPE_NEOX as we can't include ggml.h
#define HTP_ROPE_TYPE_NORMAL 0
#define HTP_ROPE_TYPE_NEOX   2

#define HTP_ROPE_SPAD_NROWS  16
#define HTP_ROPE_SPAD_BLOCK  (HTP_ROPE_SPAD_NROWS/2)

#define htp_rope_preamble              \
    const uint32_t ne00 = src0->ne[0]; \
    const uint32_t ne01 = src0->ne[1]; \
    const uint32_t ne02 = src0->ne[2]; \
    const uint32_t ne03 = src0->ne[3]; \
                                       \
    const uint32_t ne0 = dst->ne[0];   \
    const uint32_t ne1 = dst->ne[1];   \
    const uint32_t ne2 = dst->ne[2];   \
    const uint32_t ne3 = dst->ne[3];   \
                                       \
    const uint32_t nb00 = src0->nb[0]; \
    const uint32_t nb01 = src0->nb[1]; \
    const uint32_t nb02 = src0->nb[2]; \
    const uint32_t nb03 = src0->nb[3]; \
                                       \
    const uint32_t nb0 = dst->nb[0];   \
    const uint32_t nb1 = dst->nb[1];   \
    const uint32_t nb2 = dst->nb[2];   \
    const uint32_t nb3 = dst->nb[3];

struct htp_rope_context {
    int32_t n_dims;
    int32_t mode;
    int32_t n_ctx_orig;
    int32_t sections[4];

    float freq_base;
    float freq_scale;
    float ext_factor;
    float attn_factor;
    float beta_fast;
    float beta_slow;
    float theta_scale;
    float corr_dims[2];

    uint32_t src0_nrows_per_thread;
    size_t spad_stride;

    struct htp_ops_context * octx;

    size_t src0_row_size;
    size_t dst_row_size;
    size_t src0_row_size_aligned;
    size_t dst_row_size_aligned;
    size_t theta_cache_offset;
    uint32_t src0_nrows;

    uint64_t t_start;
};

static float rope_yarn_ramp(const float low, const float high, const int i0) {
    const float y = (i0 / 2 - low) / MAX(0.001f, high - low);

    return (1 - MIN(1, MAX(0, y)));
}

static void rope_cache_init(const float    theta_base,
                            const float    freq_scale,
                            const float *  freq_factors,
                            float *        corr_dims,
                            const uint32_t ne0,
                            const float    ext_factor,
                            const float    mscale,
                            float *        cache,
                            const float    theta_scale) {
    // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py
    float theta = theta_base;

    for (uint32_t i0 = 0; i0 < ne0; i0 += 2) {
        const float ff = freq_factors ? freq_factors[i0 / 2] : 1.0f;

        float theta_extrap = theta / ff;

        // Get n-d rotational scaling corrected for extrapolation
        float theta_interp = freq_scale * theta_extrap;
        float theta_final  = theta_interp;
        float mscale_final = mscale;

        if (ext_factor != 0.0f) {
            float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor;
            theta_final    = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;

            // Get n-d magnitude scaling corrected for interpolation
            mscale_final *= 1.0f + 0.1f * logf(1.0f / freq_scale);
        }

        cache[i0 + 0] = cosf(theta_final) * mscale_final;
        cache[i0 + 1] = sinf(theta_final) * mscale_final;

        theta *= theta_scale;
    }
}

#define M_PI 3.1415926535897932384626433

static void rope_corr_dims(int     n_dims,
                           int     n_ctx_orig,
                           float   freq_base,
                           float   beta_fast,
                           float   beta_slow,
                           float * dims) {
    float start = floorf(n_dims * logf(n_ctx_orig / (beta_fast * 2 * (float) M_PI)) / (2 * logf(freq_base)));
    float end   = ceilf(n_dims * logf(n_ctx_orig / (beta_slow * 2 * (float) M_PI)) / (2 * logf(freq_base)));
    dims[0]     = MAX(0, start);
    dims[1]     = MIN(n_dims - 1, end);
}

static inline void hvx_rope_neox_f32_aa(float * restrict dst, const float * restrict src0, uint32_t ne, const float * restrict theta_cache) {
    const HVX_Vector * restrict vsrc   = (const HVX_Vector *) src0;
    const HVX_Vector * restrict vtheta = (const HVX_Vector *) theta_cache;
    HVX_Vector       * restrict vdst   = (HVX_Vector *) dst;

    uint32_t nvec = (ne / (VLEN_FP32 * 2) * 2); // 2 vecs per loop, step of 2

    uint32_t he = ne / 2;         // half_dims offset in elements
    uint32_t hv = he / VLEN_FP32; // half_dims offset in vectors

    #pragma unroll(2)
    for (uint32_t i = 0; i < nvec; i += 2) {
        HVX_Vector v0 = vsrc[i/2+0];
        HVX_Vector v1 = vsrc[i/2+hv];

        HVX_Vector v2 = vtheta[i+0];
        HVX_Vector v3 = vtheta[i+1];

        HVX_VectorPair vcos_sin = Q6_W_vdeal_VVR(v3, v2, -4);  // vcos_sin[0] = cos_theta, vcos_sin[1] = sin_theta

        HVX_Vector vx0_c = Q6_Vqf32_vmpy_VsfVsf(v0, Q6_V_lo_W(vcos_sin));
        HVX_Vector vx0_s = Q6_Vqf32_vmpy_VsfVsf(v0, Q6_V_hi_W(vcos_sin));
        HVX_Vector vx1_c = Q6_Vqf32_vmpy_VsfVsf(v1, Q6_V_lo_W(vcos_sin));
        HVX_Vector vx1_s = Q6_Vqf32_vmpy_VsfVsf(v1, Q6_V_hi_W(vcos_sin));

        HVX_Vector v4 = Q6_Vqf32_vsub_Vqf32Vqf32(vx0_c, vx1_s);
        HVX_Vector v5 = Q6_Vqf32_vadd_Vqf32Vqf32(vx0_s, vx1_c);

        vdst[i/2+0]  = Q6_Vsf_equals_Vqf32(v4);
        vdst[i/2+hv] = Q6_Vsf_equals_Vqf32(v5);
    }

    for (uint32_t i = nvec * VLEN_FP32; i < ne; i += 2) {
        const float cos_theta = theta_cache[i+0];
        const float sin_theta = theta_cache[i+1];
        float x0 = src0[i/2];
        float x1 = src0[i/2 + he];
        dst[i/2]      = x0 * cos_theta - x1 * sin_theta;
        dst[i/2 + he] = x0 * sin_theta + x1 * cos_theta;
    }
}

static inline void hvx_rope_f32_aa(float * restrict dst, const float * restrict src0, uint32_t ne, const float * restrict theta_cache) {
    const HVX_Vector * restrict vsrc   = (const HVX_Vector *) src0;
    const HVX_Vector * restrict vtheta = (const HVX_Vector *) theta_cache;
    HVX_Vector       * restrict vdst   = (HVX_Vector *) dst;

    uint32_t nvec = (ne / (VLEN_FP32 * 2)) * 2; // 2 vecs per loop, step of two

    #pragma unroll(2)
    for (uint32_t i = 0; i < nvec; i+=2) {
        HVX_Vector v0 = vsrc[i+0];
        HVX_Vector v1 = vsrc[i+1];

        HVX_Vector v2 = vtheta[i+0];
        HVX_Vector v3 = vtheta[i+1];

        HVX_VectorPair vx0_x1   = Q6_W_vdeal_VVR(v1, v0, -4);  // vx0_x1[0] = x0, vx0_x1[1] = x1
        HVX_VectorPair vcos_sin = Q6_W_vdeal_VVR(v3, v2, -4);  // vcos_sin[0] = cos_theta, vcos_sin[1] = sin_theta

        HVX_Vector vx0_c = Q6_Vqf32_vmpy_VsfVsf(Q6_V_lo_W(vx0_x1), Q6_V_lo_W(vcos_sin));
        HVX_Vector vx0_s = Q6_Vqf32_vmpy_VsfVsf(Q6_V_lo_W(vx0_x1), Q6_V_hi_W(vcos_sin));
        HVX_Vector vx1_c = Q6_Vqf32_vmpy_VsfVsf(Q6_V_hi_W(vx0_x1), Q6_V_lo_W(vcos_sin));
        HVX_Vector vx1_s = Q6_Vqf32_vmpy_VsfVsf(Q6_V_hi_W(vx0_x1), Q6_V_hi_W(vcos_sin));

        HVX_Vector v4 = Q6_Vqf32_vsub_Vqf32Vqf32(vx0_c, vx1_s);
        HVX_Vector v5 = Q6_Vqf32_vadd_Vqf32Vqf32(vx0_s, vx1_c);

        HVX_VectorPair vstore = Q6_W_vshuff_VVR(Q6_Vsf_equals_Vqf32(v5), Q6_Vsf_equals_Vqf32(v4), -4);

        vdst[i+0] = Q6_V_lo_W(vstore);
        vdst[i+1] = Q6_V_hi_W(vstore);
    }

    for (uint32_t i = nvec * VLEN_FP32; i < ne; i += 2) {
        const float cos_theta = theta_cache[i+0];
        const float sin_theta = theta_cache[i+1];
        float x0 = src0[i+0];
        float x1 = src0[i+1];
        dst[i+0] = x0 * cos_theta - x1 * sin_theta;
        dst[i+1] = x0 * sin_theta + x1 * cos_theta;
    }
}

static void inline rope_basic_f32(struct htp_rope_context * rctx, uint8_t * restrict dst, uint8_t * restrict src,
                   uint32_t nr, uint32_t ne0, const float * restrict theta_cache) {
    #pragma unroll(4)
    for (uint32_t i = 0; i < nr; i++) {
        float * d = (float *) (dst + i * rctx->dst_row_size_aligned);
        float * s = (float *) (src + i * rctx->src0_row_size_aligned);

        hvx_rope_f32_aa(d, s, rctx->n_dims, theta_cache);

        // fill the remain channels with data from src tensor
        if (rctx->n_dims < ne0) {
            hvx_copy_f32_uu((uint8_t *)(d + rctx->n_dims), (uint8_t *)(s + rctx->n_dims), ne0 - rctx->n_dims);
        }
    }
}

static void inline rope_neox_f32(struct htp_rope_context * rctx, uint8_t * restrict dst, uint8_t * restrict src,
                   uint32_t nr, uint32_t ne0, const float * restrict theta_cache) {
    #pragma unroll(4)
    for (uint32_t i = 0; i < nr; i++) {
        float * d = (float *) (dst + i * rctx->dst_row_size_aligned);
        float * s = (float *) (src + i * rctx->src0_row_size_aligned);

        hvx_rope_neox_f32_aa(d, s, rctx->n_dims, theta_cache);

        // fill the remain channels with data from src tensor
        if (rctx->n_dims < ne0) {
            hvx_copy_f32_uu((uint8_t *)(d + rctx->n_dims), (uint8_t *)(s + rctx->n_dims), ne0 - rctx->n_dims);
        }
    }
}

static void rope_job_f32(unsigned int nth, unsigned int ith, void * data) {
    struct htp_rope_context * rctx = (struct htp_rope_context *) data;
    struct htp_ops_context * octx = rctx->octx;

    const struct htp_tensor * src0 = octx->src[0];
    const struct htp_tensor * src1 = octx->src[1];
    const struct htp_tensor * src2 = octx->src[2];
    const struct htp_tensor * dst  = octx->dst;

    htp_rope_preamble;

    const uint32_t src0_nrows = rctx->src0_nrows;
    const uint32_t src0_nrows_per_thread = rctx->src0_nrows_per_thread;

    const uint32_t src0_start_row = src0_nrows_per_thread * ith;
    const uint32_t src0_end_row   = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows);

    // no work for this thread
    if (src0_start_row >= src0_end_row) {
        return;
    }

    uint64_t tt = HAP_perf_get_qtimer_count();

    const int32_t mode    = rctx->mode;
    const bool    is_neox = mode & HTP_ROPE_TYPE_NEOX;

    // VTCM setup
    uint8_t * src0_spad_base = octx->src0_spad.data + (ith * octx->src0_spad.size_per_thread);
    float *   theta_cache    = (float *) (src0_spad_base);
              src0_spad_base = src0_spad_base + rctx->theta_cache_offset;
    uint8_t * dst_spad_base  = octx->dst_spad.data + (ith * octx->dst_spad.size_per_thread);

    dma_queue * dma_queue = octx->ctx->dma[ith];
    const int32_t * pos = (const int32_t *) src1->data;
    const float * freq_factors = src2 ? (const float *) src2->data : NULL;

    uint32_t ir = 0;
    uint32_t prev_i2 = (uint32_t) -1;

    for (uint32_t i3 = 0; i3 < ne3; i3++) { // batch
        for (uint32_t i2 = 0; i2 < ne2; i2++) { // seq-len
            for (uint32_t i1 = 0; i1 < ne1; ) { // attn-heads
                if (ir < src0_start_row) { ir++; i1++; continue; }
                if (ir >= src0_end_row) goto done;

                // Rows in this block
                const uint32_t nrows = MIN(src0_end_row - ir, ne1 - i1);

                // Depth before prefetch
                uint32_t dma_depth = dma_queue_depth(dma_queue);

                // FARF(HIGH, "rope-block %u: ir %u n-rows %u dma-depth %u : usec %u", ith, ir, nrows, dma_depth,
                //             (unsigned) HAP_perf_qtimer_count_to_us(HAP_perf_get_qtimer_count() - rctx->t_start));

                // Prefetch loop
                for (uint32_t pnr = 0, pr = 0; pr < nrows && pr < HTP_ROPE_SPAD_NROWS; pr += pnr) {
                    pnr = MIN(nrows - pr, HTP_ROPE_SPAD_BLOCK);

                    uint32_t pi1 = i1 + pr;
                    uint32_t pir = ir + pr;

                    // Dummy DMA transaction for sequencing (interleaving dst,src,dst,...)
                    dma_queue_push_vtcm_to_ddr(dma_queue, dma_make_ptr((void *) dst->data, dst_spad_base + pr * rctx->dst_row_size_aligned), 0, 0, 0);

                    const uint8_t * src_addr = (const uint8_t *) src0->data + i3 * nb03 + i2 * nb02 + pi1 * nb01;
                          uint8_t * src_spad = src0_spad_base + pr * rctx->src0_row_size_aligned;
                    dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(src_spad, src_addr),
                        rctx->src0_row_size_aligned, rctx->src0_row_size, pnr);

                    // FARF(HIGH, "rope-prefetch %u: pr %u i1 %u i2 %u i3 %u src-spad %p src-addr %p pnr %u", ith, pir, pi1, i2, i3, src_spad, src_addr, pnr);
                }

                // Update theta cache
                if (i2 != prev_i2) {
                    prev_i2 = i2;

                    const int32_t p = pos[i2];
                    rope_cache_init(p, rctx->freq_scale, freq_factors, rctx->corr_dims, ne0, rctx->ext_factor, rctx->attn_factor, theta_cache, rctx->theta_scale);

                    // FARF(HIGH, "rope-theta %u: ir %u i1 %u i2 %u i3 %u cache %p : usec %u", ith, ir, i1, i2, i3, theta_cache,
                    //         (unsigned) HAP_perf_qtimer_count_to_us(HAP_perf_get_qtimer_count() - rctx->t_start));
                }

                // Skip output DMA transactions from prev block (if any)
                // No need to wait for those here since we're explicitly waiting for the latest prefecthes below.
                for (uint32_t d=0; d < dma_depth; d++) { dma_queue_pop_nowait(dma_queue); }

                // Compute loop
                for (uint32_t cnr = 0, cr = 0; cr < nrows; cr += cnr, ir += cnr, i1 += cnr) {
                    // Number of rows to compute
                    cnr = MIN(nrows - cr, HTP_ROPE_SPAD_BLOCK);

                    uint8_t * dst_spad = (uint8_t *) dma_queue_pop(dma_queue).src;
                    uint8_t * src_spad = (uint8_t *) dma_queue_pop(dma_queue).dst;

                    // FARF(HIGH, "rope-compute %u: ir %u i1 %u i2 %u i3 %u src-spad %p cnr %u : usec %u", ith, ir, i1, i2, i3, src_spad, cnr,
                    //         (unsigned) HAP_perf_qtimer_count_to_us(HAP_perf_get_qtimer_count() - rctx->t_start));

                    if (is_neox) {
                        rope_neox_f32(rctx, dst_spad, src_spad, cnr, ne0, theta_cache);
                    } else {
                        rope_basic_f32(rctx, dst_spad, src_spad, cnr, ne0, theta_cache);
                    }

                    uint8_t * dst_addr = (uint8_t *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1;
                    dma_queue_push_vtcm_to_ddr(dma_queue, dma_make_ptr(dst_addr, dst_spad), rctx->dst_row_size, rctx->dst_row_size_aligned, cnr);

                    // Prefetch more rows (if any)
                    if ((cr + HTP_ROPE_SPAD_NROWS) < nrows) {
                        uint32_t pnr = MIN(nrows - (cr + HTP_ROPE_SPAD_NROWS), HTP_ROPE_SPAD_BLOCK);
                        uint32_t pi1 = i1 + HTP_ROPE_SPAD_NROWS;
                        uint32_t pir = ir + HTP_ROPE_SPAD_NROWS;

                        const uint8_t * src_addr = (const uint8_t *) src0->data + i3 * nb03 + i2 * nb02 + pi1 * nb01;
                        dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(src_spad, src_addr),
                            rctx->src0_row_size_aligned, rctx->src0_row_size, pnr);

                        // FARF(HIGH, "rope-prefetch %u: pr %u i1 %u i2 %u i3 %u src-spad %p src-addr %p pnr %u", ith, pir, pi1, i2, i3, src_spad, src_addr, pnr);
                    }
                }
            }
        }
    }

done:
    dma_queue_flush(dma_queue);
    tt = HAP_perf_get_qtimer_count() - tt;

    FARF(HIGH, "rope-f32: %d/%d: (%u:%u) usec %u\n", ith, nth, src0_start_row, src0_end_row, (unsigned) HAP_perf_qtimer_count_to_us(tt));
}

static int execute_op_rope_f32(struct htp_ops_context * octx) {
    int err = HTP_STATUS_OK;

    const struct htp_tensor * src0 = octx->src[0];
    const struct htp_tensor * src1 = octx->src[1];
    const struct htp_tensor * src2 = octx->src[2];
    const struct htp_tensor * dst  = octx->dst;

    const char * op_type = "rope-f32";

    switch (octx->op) {
        case HTP_OP_ROPE:
            break;

        default:
            FARF(ERROR, "Unsupported Op %u\n", octx->op);
            return HTP_STATUS_NO_SUPPORT;
    }

    const uint32_t ne0 = dst->ne[0];
    const uint32_t src0_nrows = src0->ne[1] * src0->ne[2] * src0->ne[3];
    const uint32_t n_threads = MIN(octx->n_threads, src0_nrows);

    const size_t src0_row_size = src0->nb[1];
    const size_t dst_row_size  = dst->nb[1];

    // Aligned row sizes for VTCM
    const size_t src0_row_size_aligned    = hex_round_up(src0_row_size, VLEN);
    const size_t dst_row_size_aligned     = hex_round_up(dst_row_size, VLEN);
    const size_t theta_cache_size_aligned = hex_round_up(src0->ne[0] * sizeof(float), 128);

    // Calculate spad sizes per thread
    size_t src0_spad_per_thread = theta_cache_size_aligned + HTP_ROPE_SPAD_NROWS * src0_row_size_aligned;
    size_t dst_spad_per_thread  = HTP_ROPE_SPAD_NROWS * dst_row_size_aligned;
    size_t spad_per_thread = src0_spad_per_thread + dst_spad_per_thread;

    // Check if we fit in VTCM
    size_t total_vtcm_needed = spad_per_thread * n_threads;
    if (octx->ctx->vtcm_size < total_vtcm_needed) {
        FARF(ERROR, "%s : current VTCM reservation %zu is too small, needed %zu\n", op_type, octx->ctx->vtcm_size, total_vtcm_needed);
        return HTP_STATUS_VTCM_TOO_SMALL;
    }

    octx->src0_spad.size_per_thread = src0_spad_per_thread;
    octx->dst_spad.size_per_thread  = dst_spad_per_thread;
    octx->src0_spad.size = n_threads * src0_spad_per_thread;
    octx->dst_spad.size  = n_threads * dst_spad_per_thread;
    octx->src1_spad.size = 0;

    octx->src0_spad.data = octx->ctx->vtcm_base;                        octx->src0_spad.src = NULL;
    octx->src1_spad.data = NULL;                                        octx->src1_spad.src = NULL;
    octx->dst_spad.data  = octx->src0_spad.data + octx->src0_spad.size; octx->dst_spad.src  = NULL;

    struct htp_rope_context rctx;
    memset(&rctx, 0, sizeof(struct htp_rope_context));

    rctx.t_start = HAP_perf_get_qtimer_count();

    rctx.octx = octx;

    const int32_t * op_params = &octx->op_params[0];
    rctx.n_dims     = ((const int32_t *) op_params)[1];
    rctx.mode       = ((const int32_t *) op_params)[2];
    rctx.n_ctx_orig = ((const int32_t *) op_params)[4];

    memcpy(&rctx.freq_base,   (int32_t *) op_params + 5,  sizeof(float));
    memcpy(&rctx.freq_scale,  (int32_t *) op_params + 6,  sizeof(float));
    memcpy(&rctx.ext_factor,  (int32_t *) op_params + 7,  sizeof(float));
    memcpy(&rctx.attn_factor, (int32_t *) op_params + 8,  sizeof(float));
    memcpy(&rctx.beta_fast,   (int32_t *) op_params + 9,  sizeof(float));
    memcpy(&rctx.beta_slow,   (int32_t *) op_params + 10, sizeof(float));
    memcpy(&rctx.sections,    (int32_t *) op_params + 11, sizeof(int) * 4);

    rctx.theta_scale = powf(rctx.freq_base, -2.0f / rctx.n_dims);

    rope_corr_dims(rctx.n_dims, rctx.n_ctx_orig, rctx.freq_base, rctx.beta_fast, rctx.beta_slow, rctx.corr_dims);

    rctx.src0_row_size = src0_row_size;
    rctx.dst_row_size  = dst_row_size;
    rctx.src0_row_size_aligned = src0_row_size_aligned;
    rctx.dst_row_size_aligned  = dst_row_size_aligned;
    rctx.theta_cache_offset    = theta_cache_size_aligned;

    rctx.src0_nrows = src0_nrows;
    rctx.src0_nrows_per_thread = (src0_nrows + n_threads - 1) / n_threads;

    FARF(HIGH, "rope-f32 n-rows %u n-dims %d ne0 %u ext-factor %.6f theta-scale %.6f attn-factor %.6f\n", rctx.src0_nrows, rctx.n_dims, ne0,
         rctx.ext_factor, rctx.theta_scale, rctx.attn_factor);

    if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) {
        worker_pool_run_func(octx->ctx->worker_pool, rope_job_f32, &rctx, n_threads);
    }

    return err;
}

int op_rope(struct htp_ops_context * octx) {
    int err = HTP_STATUS_OK;

    switch (octx->src[0]->type) {
        case HTP_TYPE_F32:
            err = execute_op_rope_f32(octx);
            break;

        default:
            err = HTP_STATUS_NO_SUPPORT;
            break;
    }

    return err;
}