trtx-sys 0.3.0

Raw FFI bindings to NVIDIA TensorRT-RTX (EXPERIMENTAL - NOT FOR PRODUCTION)
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
/**
 * Logger Bridge for TensorRT-RTX Rust Bindings
 * 
 * This file provides C wrapper functions for TensorRT-RTX C++ API.
 * While we use autocxx for most C++ bindings, some wrappers are still necessary.
 * 
 * ## Architecture
 * 
 * ```
 * Rust (trtx) → Raw FFI (trtx-sys) → logger_bridge.cpp → TensorRT C++ + autocxx
 * ```
 * 
 * ## Why These Wrappers Exist
 * 
 * ### NECESSARY WRAPPERS (Cannot be removed):
 * 
 * 1. **Logger Bridge (lines 29-52)**: 
 *    - Rust cannot implement C++ virtual classes
 *    - RustLoggerImpl forwards virtual method calls to Rust callbacks
 *    - REQUIRED: No alternative
 * 
 * 2. **Factory Functions (lines 55-91)**:
 *    - createInferBuilder/Runtime take `ILogger&` references
 *    - autocxx struggles with C++ reference parameters
 *    - REQUIRED: Simplest solution for reference params
 * 
 * 3. **CUDA Wrappers (lines 658-677)**:
 *    - Bridge between std::ffi::c_void and autocxx::c_void
 *    - Type compatibility issue between Rust and autocxx types
 *    - KEEP FOR NOW: Could be removed with codebase-wide type migration
 * 
 * ### POTENTIALLY REDUNDANT WRAPPERS:
 * 
 * 4. **TensorRT Method Wrappers (lines 94-657)**:
 *    - Builder, Network, Tensor, Engine, Context methods
 *    - autocxx CAN generate these with `generate!("nvinfer1::INetworkDefinition")`
 *    - POTENTIALLY REMOVABLE: ~75% code reduction if refactored
 *    - STATUS: Kept for now due to stability, could be migrated to direct autocxx calls
 * 
 * ## Why Not Full autocxx?
 * 
 * We TRIED to use autocxx for everything but encountered:
 * - Type mismatches (autocxx::c_void vs std::ffi::c_void)
 * - Reference parameter handling issues
 * - Virtual method/callback complications
 * 
 * ## See Also
 * - docs/LOGGER_BRIDGE_ANALYSIS.md - Detailed analysis of each function
 * - docs/REFACTORING_SUMMARY.md - Test results and recommendations
 * - docs/FFI_GUIDE.md - How to modify bindings
 */

#include "logger_bridge.hpp"
#include <NvOnnxParser.h>
#include <cstdint>
#include <cstring>

//==============================================================================
// SECTION 1: LOGGER BRIDGE (NECESSARY - Virtual Methods)
//==============================================================================
// Rust cannot implement C++ virtual classes, so we need this C++ class
// to forward ILogger::log() calls back to Rust via function pointer callbacks.

// C++ implementation of ILogger that bridges to Rust
class RustLoggerImpl : public nvinfer1::ILogger {
public:
    RustLoggerImpl(RustLogCallback callback, void* user_data)
        : callback_(callback), user_data_(user_data) {}

    void log(Severity severity, const char* msg) noexcept override {
        if (callback_) {
            callback_(user_data_, static_cast<int32_t>(severity), msg);
        }
    }

private:
    RustLogCallback callback_;
    void* user_data_;
};

// Opaque struct that holds the logger implementation
struct RustLoggerBridge {
    RustLoggerImpl* impl;
};

extern "C" {

RustLoggerBridge* create_rust_logger_bridge(RustLogCallback callback, void* user_data) {
    if (!callback) {
        return nullptr;
    }
    
    try {
        auto* bridge = new RustLoggerBridge();
        bridge->impl = new RustLoggerImpl(callback, user_data);
        return bridge;
    } catch (...) {
        return nullptr;
    }
}

void destroy_rust_logger_bridge(RustLoggerBridge* logger) {
    if (logger) {
        delete logger->impl;
        delete logger;
    }
}

nvinfer1::ILogger* get_logger_interface(RustLoggerBridge* logger) {
    return logger ? logger->impl : nullptr;
}

//==============================================================================
// SECTION 2: FACTORY FUNCTIONS (NECESSARY - Reference Parameters)
//==============================================================================
// These functions take `ILogger&` references which autocxx struggles with.
// Simpler to keep these thin wrappers than to work around autocxx limitations.

// Factory functions for TensorRT
#ifdef TRTX_LINK_TENSORRT_RTX
void* create_infer_builder(void* logger) {
    if (!logger) {
        return nullptr;
    }
    try {
        auto* ilogger = static_cast<nvinfer1::ILogger*>(logger);
        return nvinfer1::createInferBuilder(*ilogger);
    } catch (...) {
        return nullptr;
    }
}

void* create_infer_runtime(void* logger) {
    if (!logger) {
        return nullptr;
    }
    try {
        auto* ilogger = static_cast<nvinfer1::ILogger*>(logger);
        return nvinfer1::createInferRuntime(*ilogger);
    } catch (...) {
        return nullptr;
    }
}
#endif

#ifdef TRTX_LINK_TENSORRT_ONNXPARSER
// ONNX Parser factory function
void* create_onnx_parser(void* network, void* logger) {
    if (!network || !logger) {
        return nullptr;
    }
    try {
        auto* inetwork = static_cast<nvinfer1::INetworkDefinition*>(network);
        auto* ilogger = static_cast<nvinfer1::ILogger*>(logger);
        return nvonnxparser::createParser(*inetwork, *ilogger);
    } catch (...) {
        return nullptr;
    }
}
#endif

//==============================================================================
// SECTION 3: BUILDER & CONFIG METHODS (POTENTIALLY REDUNDANT)
//==============================================================================
// These wrap IBuilder and IBuilderConfig methods.
// autocxx CAN generate these with generate!("nvinfer1::IBuilder").
// FUTURE: Consider migrating to direct autocxx calls (see REFACTORING_SUMMARY.md)

// Builder methods
void builder_config_set_memory_pool_limit(void* config, int32_t pool_type, size_t limit) {
    if (!config) return;
    try {
        auto* iconfig = static_cast<nvinfer1::IBuilderConfig*>(config);
        iconfig->setMemoryPoolLimit(static_cast<nvinfer1::MemoryPoolType>(pool_type), limit);
    } catch (...) {
        // Ignore errors
    }
}

//==============================================================================
// SECTION 4: NETWORK DEFINITION METHODS (POTENTIALLY REDUNDANT)
//==============================================================================
// These wrap INetworkDefinition layer building methods.
// autocxx CAN generate these with generate!("nvinfer1::INetworkDefinition").
// FUTURE: Consider migrating to direct autocxx calls
// NOTE: This is the largest section (~350 lines) and biggest refactoring opportunity

// Network methods
// network_add_input - REMOVED - Now using direct autocxx call in network.rs

// network_add_convolution - REMOVED - Using direct autocxx

// network_add_activation - REMOVED - Now using direct autocxx call in network.rs

// network_add_pooling - REMOVED - Now using direct autocxx call in network.rs

// network_add_matrix_multiply - REMOVED - Using direct autocxx

// network_add_constant - REMOVED - Using direct autocxx

// network_add_elementwise - REMOVED - Now using direct autocxx call in network.rs

// network_add_shuffle - REMOVED - Now using direct autocxx call in network.rs

void* network_add_concatenation(void* network, void** inputs, int32_t nb_inputs) {
    if (!network || !inputs || nb_inputs <= 0) return nullptr;
    try {
        auto* inetwork = static_cast<nvinfer1::INetworkDefinition*>(network);
        std::vector<nvinfer1::ITensor*> tensors;
        tensors.reserve(nb_inputs);
        for (int32_t i = 0; i < nb_inputs; ++i) {
            tensors.push_back(static_cast<nvinfer1::ITensor*>(inputs[i]));
        }
        auto* layer = inetwork->addConcatenation(tensors.data(), nb_inputs);
        return layer; // Return layer, not output tensor
    } catch (...) {
        return nullptr;
    }
}

// network_add_softmax - REMOVED - Using direct autocxx

// network_add_scale - REMOVED - Using direct autocxx

// network_add_reduce - REMOVED - Using direct autocxx

// network_add_slice - REMOVED - Now using direct autocxx call in network.rs

// network_add_resize - REMOVED - Using direct autocxx

// network_add_topk - REMOVED - Using direct autocxx

// network_add_gather - REMOVED - Using direct autocxx

// network_add_select - REMOVED - Using direct autocxx

void* network_add_assertion(void* network, void* condition, const char* message) {
    if (!network || !condition) return nullptr;
    try {
        auto* inetwork = static_cast<nvinfer1::INetworkDefinition*>(network);
        auto* condition_tensor = static_cast<nvinfer1::ITensor*>(condition);
        auto* layer = inetwork->addAssertion(*condition_tensor, message ? message : "");
        // Assertion layers don't have outputs, return the layer itself
        return layer;
    } catch (...) {
        return nullptr;
    }
}

void* network_add_loop(void* network) {
    if (!network) return nullptr;
    try {
        auto* inetwork = static_cast<nvinfer1::INetworkDefinition*>(network);
        return inetwork->addLoop();
    } catch (...) {
        return nullptr;
    }
}

void* network_add_if_conditional(void* network) {
    if (!network) return nullptr;
    try {
        auto* inetwork = static_cast<nvinfer1::INetworkDefinition*>(network);
        return inetwork->addIfConditional();
    } catch (...) {
        return nullptr;
    }
}

//==============================================================================
// SECTION 5: TENSOR METHODS (POTENTIALLY REDUNDANT)
//==============================================================================
// Wrap ITensor getter/setter methods.
// autocxx CAN generate with generate!("nvinfer1::ITensor")

// Tensor methods
void* tensor_get_dimensions(void* tensor, int32_t* dims, int32_t* nb_dims) {
    if (!tensor || !dims || !nb_dims) return nullptr;
    try {
        auto* itensor = static_cast<nvinfer1::ITensor*>(tensor);
        nvinfer1::Dims dimensions = itensor->getDimensions();
        *nb_dims = dimensions.nbDims;
        for (int32_t i = 0; i < dimensions.nbDims && i < nvinfer1::Dims::MAX_DIMS; ++i) {
            dims[i] = dimensions.d[i];
        }
        return tensor; // Return success
    } catch (...) {
        return nullptr;
    }
}

int32_t tensor_get_type(void* tensor) {
    if (!tensor) return -1;
    try {
        auto* itensor = static_cast<nvinfer1::ITensor*>(tensor);
        return static_cast<int32_t>(itensor->getType());
    } catch (...) {
        return -1;
    }
}

void* builder_build_serialized_network(void* builder, void* network, void* config, size_t* out_size) {
    if (!builder || !network || !config || !out_size) return nullptr;
    try {
        auto* ibuilder = static_cast<nvinfer1::IBuilder*>(builder);
        auto* inetwork = static_cast<nvinfer1::INetworkDefinition*>(network);
        auto* iconfig = static_cast<nvinfer1::IBuilderConfig*>(config);
        
        auto* serialized = ibuilder->buildSerializedNetwork(*inetwork, *iconfig);
        if (!serialized) return nullptr;
        
        *out_size = serialized->size();
        // Allocate and copy data
        void* data = malloc(*out_size);
        if (data) {
            memcpy(data, serialized->data(), *out_size);
        }
        delete serialized;
        return data;
    } catch (...) {
        return nullptr;
    }
}

// Runtime methods
void* runtime_deserialize_cuda_engine(void* runtime, const void* data, size_t size) {
    if (!runtime || !data) return nullptr;
    try {
        auto* iruntime = static_cast<nvinfer1::IRuntime*>(runtime);
        return iruntime->deserializeCudaEngine(data, size);
    } catch (...) {
        return nullptr;
    }
}

// Engine methods
// ExecutionContext methods
// Parser methods
bool parser_parse(void* parser, const void* data, size_t size) {
    if (!parser || !data) return false;
    try {
        auto* iparser = static_cast<nvonnxparser::IParser*>(parser);
        return iparser->parse(data, size);
    } catch (...) {
        return false;
    }
}

int32_t parser_get_nb_errors(void* parser) {
    if (!parser) return 0;
    try {
        auto* iparser = static_cast<nvonnxparser::IParser*>(parser);
        return iparser->getNbErrors();
    } catch (...) {
        return 0;
    }
}

void* parser_get_error(void* parser, int32_t index) {
    if (!parser) return nullptr;
    try {
        auto* iparser = static_cast<nvonnxparser::IParser*>(parser);
        return const_cast<nvonnxparser::IParserError*>(iparser->getError(index));
    } catch (...) {
        return nullptr;
    }
}

const char* parser_error_desc(void* error) {
    if (!error) return nullptr;
    try {
        auto* ierror = static_cast<nvonnxparser::IParserError*>(error);
        return ierror->desc();
    } catch (...) {
        return nullptr;
    }
}

//==============================================================================
// SECTION 6: DESTRUCTION METHODS (POTENTIALLY REDUNDANT)
//==============================================================================
// These wrap TensorRT object deletion.
// autocxx CAN handle C++ destructors with RAII wrappers.
// FUTURE: Consider using UniquePtr or Drop trait implementations

// Destruction methods
void delete_builder(void* builder) {
    if (builder) {
        delete static_cast<nvinfer1::IBuilder*>(builder);
    }
}

void delete_network(void* network) {
    if (network) {
        delete static_cast<nvinfer1::INetworkDefinition*>(network);
    }
}

void delete_config(void* config) {
    if (config) {
        delete static_cast<nvinfer1::IBuilderConfig*>(config);
    }
}

void delete_runtime(void* runtime) {
    if (runtime) {
        delete static_cast<nvinfer1::IRuntime*>(runtime);
    }
}

void delete_engine(void* engine) {
    if (engine) {
        delete static_cast<nvinfer1::ICudaEngine*>(engine);
    }
}

void delete_context(void* context) {
    if (context) {
        delete static_cast<nvinfer1::IExecutionContext*>(context);
    }
}

void delete_parser(void* parser) {
    if (parser) {
        delete static_cast<nvonnxparser::IParser*>(parser);
    }
}

uint32_t get_tensorrt_version() {
    return NV_TENSORRT_VERSION;
}

} // extern "C"