πModel Inference (experimental)
Example showing how to use the models inference functionality.
Edit the pipeline
(
tasks: [
// NOTE: Modify this block to customize
(
id: "cam0",
type: "crate::cu29::tasks::VideoCapture",
config: {
config: {
// URL of the RTSP camera
"source_type": "rtsp",
"source_uri": "rtsp://<username>:<password>@<ip>:<port>/<stream>"
}
),
(
id: "inference",
type: "crate::cu29::tasks::Inference",
),
(
id: "bcast_text",
type: "crate::cu29::tasks::BroadcastChat",
),
(
id: "bcast_image",
type: "crate::cu29::tasks::BroadcastImage",
),
],
cnx: [
(src: "cam0", dst: "inference", msg: "crate::cu29::msgs::ImageRgb8Msg"),
(src: "cam0", dst: "bcast_image", msg: "crate::cu29::msgs::ImageRgb8Msg"),
(src: "inference", dst: "bcast_text", msg: "crate::cu29::msgs::PromptResponseMsg"),
],
logging: (
slab_size_mib: 1024, // Preallocates 1GiB of memory map file at a time
section_size_mib: 100, // Preallocates 100MiB of memory map per section for the main logger.
enable_task_logging: false,
),
)Start the server
Start the inference
Inference settings
Broadcast
Jpeg encoded images
Model inference results
Visualize streams with inference results

Stop inference
Last updated